def mergefiles(ui, repo, wctx, shelvectx): """updates to wctx and merges the changes from shelvectx into the dirstate.""" with ui.configoverride({('ui', 'quiet'): True}): hg.update(repo, wctx.node()) files = [] files.extend(shelvectx.files()) files.extend(shelvectx.parents()[0].files()) # revert will overwrite unknown files, so move them out of the way for file in repo.status(unknown=True).unknown: if file in files: util.rename(file, scmutil.origpath(ui, repo, file)) ui.pushbuffer(True) cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(), *pathtofiles(repo, files), **{r'no_backup': True}) ui.popbuffer()
def mergefiles(ui, repo, wctx, shelvectx): """updates to wctx and merges the changes from shelvectx into the dirstate.""" with ui.configoverride({('ui', 'quiet'): True}): hg.update(repo, wctx.node()) files = [] files.extend(shelvectx.files()) files.extend(shelvectx.parents()[0].files()) # revert will overwrite unknown files, so move them out of the way for file in repo.status(unknown=True).unknown: if file in files: util.rename(file, scmutil.origpath(ui, repo, file)) ui.pushbuffer(True) cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(), *pathtofiles(repo, files), **{'no_backup': True}) ui.popbuffer()
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) with repo.wlock(): lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} dropped = set() updated, removed = 0, 0 wvfs = repo.wvfs wctx = repo[None] for lfile in lfiles: rellfile = lfile rellfileorig = os.path.relpath( scmutil.origpath(ui, repo, wvfs.join(rellfile)), start=repo.root) relstandin = lfutil.standin(lfile) relstandinorig = os.path.relpath( scmutil.origpath(ui, repo, wvfs.join(relstandin)), start=repo.root) if wvfs.exists(relstandin): if (wvfs.exists(relstandinorig) and wvfs.exists(rellfile)): shutil.copyfile(wvfs.join(rellfile), wvfs.join(rellfileorig)) wvfs.unlinkpath(relstandinorig) expecthash = lfutil.readasstandin(wctx[relstandin]) if expecthash != '': if lfile not in wctx: # not switched to normal file if repo.dirstate[relstandin] != '?': wvfs.unlinkpath(rellfile, ignoremissing=True) else: dropped.add(rellfile) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (wvfs.exists(rellfile) and repo.dirstate.normalize(lfile) not in wctx): wvfs.unlinkpath(rellfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: lfiles = [f for f in lfiles if f not in dropped] for f in dropped: repo.wvfs.unlinkpath(lfutil.standin(f)) # This needs to happen for dropped files, otherwise they stay in # the M state. lfutil.synclfdirstate(repo, lfdirstate, f, normallookup) statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the exec mode of largefile standin from the repository's # dirstate to its state in the lfdirstate. rellfile = lfile relstandin = lfutil.standin(lfile) if wvfs.exists(relstandin): # exec is decided by the users permissions using mask 0o100 standinexec = wvfs.stat(relstandin).st_mode & 0o100 st = wvfs.stat(rellfile) mode = st.st_mode if standinexec != mode & 0o100: # first remove all X bits, then shift all R bits to X mode &= ~0o111 if standinexec: mode |= (mode >> 2) & 0o111 & ~util.umask wvfs.chmod(rellfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter(_('%d largefiles updated, %d removed\n') % (updated, removed))
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) with repo.wlock(): lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) abslfileorig = scmutil.origpath(ui, repo, abslfile) absstandin = repo.wjoin(lfutil.standin(lfile)) absstandinorig = scmutil.origpath(ui, repo, absstandin) if os.path.exists(absstandin): if (os.path.exists(absstandinorig) and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfileorig) util.unlinkpath(absstandinorig) expecthash = lfutil.readstandin(repo, lfile) if expecthash != '': if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter( _('%d largefiles updated, %d removed\n') % (updated, removed))
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) with repo.wlock(): lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) abslfileorig = scmutil.origpath(ui, repo, abslfile) absstandin = repo.wjoin(lfutil.standin(lfile)) absstandinorig = scmutil.origpath(ui, repo, absstandin) if os.path.exists(absstandin): if (os.path.exists(absstandinorig) and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfileorig) util.unlinkpath(absstandinorig) expecthash = lfutil.readstandin(repo, lfile) if expecthash != '': if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter(_('%d largefiles updated, %d removed\n') % (updated, removed))
def _summarize(repo, workingfilectx, otherctx, basectx): origfile = (None if workingfilectx.isabsent() else scmutil.origpath(repo.ui, repo, repo.wjoin(workingfilectx.path()))) def flags(context): if isinstance(context, absentfilectx): return { 'contents': None, 'exists': False, 'isexec': None, 'issymlink': None, } return { 'contents': context.data(), 'exists': True, 'isexec': context.isexec(), 'issymlink': context.islink(), } output = flags(workingfilectx) filestat = (util.filestat.frompath(origfile) if origfile is not None else None) if origfile and filestat.stat: # Since you can start a merge with a dirty working copy (either via # `up` or `merge -f`), "local" must reflect that, not the underlying # changeset. Those contents are available in the .orig version, so we # look there and mock up the schema to look like the other contexts. # # Test cases affected in test-merge-conflict-cornercases.t: #0 local = { 'contents': util.readfile(origfile), 'exists': True, 'isexec': util.isexec(origfile), 'issymlink': util.statislink(filestat.stat), } else: # No backup file. This happens whenever the merge was esoteric enough # that we didn't launch a merge tool*, and instead prompted the user to # "use (c)hanged version, (d)elete, or leave (u)nresolved". # # The only way to exit that prompt with a conflict is to choose "u", # which leaves the local version in the working copy (with all its # pre-merge properties including any local changes), so we can reuse # that. # # Affected test cases: #0b, #1, #6, #11, and #12. # # Another alternative might be to use repo['.'][path] but that wouldn't # have any dirty pre-merge changes. # # *If we had, we'd've we would've overwritten the working copy, made a # backup and hit the above case. # # Copy, so the addition of the `path` key below does not affect both # versions. local = copy.copy(output) output['path'] = repo.wjoin(workingfilectx.path()) return { 'base': flags(basectx), 'local': local, 'other': flags(otherctx), 'output': output, 'path': workingfilectx.path(), }