def _updatelfile(repo, lfdirstate, lfile): '''updates a single largefile and copies the state of its standin from the repository's dirstate to its state in the lfdirstate. returns 1 if the file was modified, -1 if the file was removed, 0 if the file was unchanged, and None if the needed largefile was missing from the cache.''' ret = 0 abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if os.path.exists(absstandin + '.orig') and os.path.exists(abslfile): shutil.copyfile(abslfile, abslfile + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if not lfutil.copyfromcache(repo, expecthash, lfile): # use normallookup() to allocate entry in largefiles dirstate, # because lack of it misleads lfilesrepo.status() into # recognition that such cache missing files are REMOVED. if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) lfdirstate.normallookup(lfile) return None # don't try to set the mode else: # Synchronize largefile dirstate to the last modified time of # the file lfdirstate.normal(lfile) ret = 1 mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) ret = 1 else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) ret = -1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': # When rebasing, we need to synchronize the standin and the largefile, # because otherwise the largefile will get reverted. But for commit's # sake, we have to mark the file as unclean. if getattr(repo, "_isrebasing", False): lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) return ret
def _updatelfile(repo, lfdirstate, lfile): '''updates a single largefile and copies the state of its standin from the repository's dirstate to its state in the lfdirstate. returns 1 if the file was modified, -1 if the file was removed, 0 if the file was unchanged, and None if the needed largefile was missing from the cache.''' ret = 0 abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if os.path.exists(absstandin + '.orig') and os.path.exists(abslfile): shutil.copyfile(abslfile, abslfile + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if not lfutil.copyfromcache(repo, expecthash, lfile): # use normallookup() to allocate entry in largefiles dirstate, # because lack of it misleads lfilesrepo.status() into # recognition that such cache missing files are REMOVED. if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) lfdirstate.normallookup(lfile) return None # don't try to set the mode else: # Synchronize largefile dirstate to the last modified time of # the file lfdirstate.normal(lfile) ret = 1 mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) ret = 1 else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) ret = -1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': # When rebasing, we need to synchronize the standin and the largefile, # because otherwise the largefile will get reverted. But for commit's # sake, we have to mark the file as unclean. if getattr(repo, "_isrebasing", False): lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) return ret
def remove_largefiles(ui, repo, *pats, **opts): after = opts.get('after') if not pats and not after: raise util.Abort(_('no files specified')) m = scmutil.match(repo[None], pats, opts) try: repo.lfstatus = True s = repo.status(match=m, clean=True) finally: repo.lfstatus = False manifest = repo[None].manifest() modified, added, deleted, clean = [[ f for f in list if lfutil.standin(f) in manifest ] for list in [s[0], s[1], s[3], s[6]]] def warn(files, reason): for f in files: ui.warn( _('not removing %s: %s (use forget to undo)\n') % (m.rel(f), reason)) if after: remove, forget = deleted, [] warn(modified + added + clean, _('file still exists')) else: remove, forget = deleted + clean, [] warn(modified, _('file is modified')) warn(added, _('file has been marked for add')) for f in sorted(remove + forget): if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the # repository and we could race inbetween. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) for f in remove: if not after: # If this is being called by addremove, notify the user that we # are removing the file. if getattr(repo, "_isaddremove", False): ui.status(_('removing %s\n') % f) if os.path.exists(repo.wjoin(f)): util.unlinkpath(repo.wjoin(f)) lfdirstate.remove(f) lfdirstate.write() forget = [lfutil.standin(f) for f in forget] remove = [lfutil.standin(f) for f in remove] lfutil.repo_forget(repo, forget) # If this is being called by addremove, let the original addremove # function handle this. if not getattr(repo, "_isaddremove", False): lfutil.repo_remove(repo, remove, unlink=True) finally: wlock.release()
def _opendb(self): '''Open the database and make sure the table is created on demand.''' version = None try: version = int(open(self._filepath).read(2)) except (ValueError, IOError): pass if version and version not in [RevMap.VERSION, self.VERSION]: raise error.Abort('revmap too new -- please upgrade') if self._db: self._db.close() # if version mismatch, the database is considered invalid if version != self.VERSION: hgutil.unlinkpath(self._dbpath, ignoremissing=True) self._db = sqlite3.connect(self._dbpath) self._db.text_factory = bytes # cache size affects random accessing (e.g. index building) # performance greatly. default is 2MB (2000 KB), we want to have # a big enough cache that can hold the entire map. cachesize = 2000 for path, ratio in [(self._filepath, 1.7), (self._dbpath, 1)]: if os.path.exists(path): cachesize += os.stat(path).st_size * ratio // 1000 # disable auto-commit. everything is inside a transaction self._db.isolation_level = 'DEFERRED' with self._transaction('EXCLUSIVE'): self._db.execute('PRAGMA cache_size=%d' % (-cachesize)) # PRAGMA statements provided by the user for pragma in (self._sqlitepragmas or []): # drop malicious ones if re.match(r'\A\w+=\w+\Z', pragma): self._db.execute('PRAGMA %s' % pragma) map(self._db.execute, self.TABLESCHEMA) if version == RevMap.VERSION: self.rowcount = 0 self._importrevmapv1() elif not self.rowcount: self.rowcount = self._db.execute( 'SELECT COUNT(1) FROM revmap').fetchone()[0] # "bulk insert; then create index" is about 2.4x as fast as # "create index; then bulk insert" on a large repo map(self._db.execute, self.INDEXSCHEMA) # write a dummy rev map file with just the revision number if version != self.VERSION: f = open(self._filepath, 'w') f.write('%s\n' % self.VERSION) f.close()
def remove_largefiles(ui, repo, *pats, **opts): after = opts.get('after') if not pats and not after: raise util.Abort(_('no files specified')) m = scmutil.match(repo[None], pats, opts) try: repo.lfstatus = True s = repo.status(match=m, clean=True) finally: repo.lfstatus = False manifest = repo[None].manifest() modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) in manifest] for list in [s[0], s[1], s[3], s[6]]] def warn(files, reason): for f in files: ui.warn(_('not removing %s: %s (use forget to undo)\n') % (m.rel(f), reason)) if after: remove, forget = deleted, [] warn(modified + added + clean, _('file still exists')) else: remove, forget = deleted + clean, [] warn(modified, _('file is modified')) warn(added, _('file has been marked for add')) for f in sorted(remove + forget): if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the # repository and we could race inbetween. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) for f in remove: if not after: # If this is being called by addremove, notify the user that we # are removing the file. if getattr(repo, "_isaddremove", False): ui.status(_('removing %s\n' % f)) if os.path.exists(repo.wjoin(f)): util.unlinkpath(repo.wjoin(f)) lfdirstate.remove(f) lfdirstate.write() forget = [lfutil.standin(f) for f in forget] remove = [lfutil.standin(f) for f in remove] lfutil.repo_forget(repo, forget) # If this is being called by addremove, let the original addremove # function handle this. if not getattr(repo, "_isaddremove", False): lfutil.repo_remove(repo, remove, unlink=True) finally: wlock.release()
def cleanup(self, ledger): entries = ledger.sources.get(self, []) allkeys = set(self) repackedkeys = set((e.filename, e.node) for e in entries if e.historyrepacked) if len(allkeys - repackedkeys) == 0: if self.path not in ledger.created: util.unlinkpath(self.indexpath, ignoremissing=True) util.unlinkpath(self.packpath, ignoremissing=True)
def cleanup(self, ledger): entries = ledger.sources.get(self, []) allkeys = set(self) repackedkeys = set( (e.filename, e.node) for e in entries if e.historyrepacked) if len(allkeys - repackedkeys) == 0: if self.path not in ledger.created: util.unlinkpath(self.indexpath, ignoremissing=True) util.unlinkpath(self.packpath, ignoremissing=True)
def _buildmeta(ui, repo, args, partial=False, skipuuid=False): if repo is None: raise error.RepoError("There is no Mercurial repository" " here (.hg not found)") dest = None validateuuid = False if len(args) == 1: dest = args[0] validateuuid = True elif len(args) > 1: raise error.Abort('rebuildmeta takes 1 or no arguments') url = repo.ui.expandpath(dest or repo.ui.config('paths', 'default-push') or repo.ui.config('paths', 'default') or '') meta = svnmeta.SVNMeta(repo, skiperrorcheck=True) svn = None if meta.subdir is None: svn = svnrepo.svnremoterepo(ui, url).svn meta.subdir = svn.subdir youngest = 0 startrev = 0 branchinfo = {} if not partial: hgutil.unlinkpath(meta.revmap_file, ignoremissing=True) revmap = meta.revmap if partial: try: # we can't use meta.lastpulled here because we are bootstraping the # lastpulled and want to keep the cached value on disk during a # partial rebuild foundpartialinfo = False youngestpath = os.path.join(meta.metapath, 'lastpulled') if os.path.exists(youngestpath): youngest = util.load(youngestpath) lasthash = revmap.lasthash if len(revmap) > 0 and lasthash: startrev = repo[lasthash].rev() + 1 branchinfo = util.load(meta.branch_info_file) foundpartialinfo = True if not foundpartialinfo: ui.status('missing some metadata -- doing a full rebuild\n') partial = False except IOError, err: if err.errno != errno.ENOENT: raise ui.status('missing some metadata -- doing a full rebuild\n') except AttributeError: ui.status('no metadata available -- doing a full rebuild\n')
def remove(list, unlink): wlock = repo.wlock() try: if unlink: for f in list: try: util.unlinkpath(repo.wjoin(f)) except OSError, inst: if inst.errno != errno.ENOENT: raise repo[None].forget(list)
def unshelvecleanup(ui, repo, name, opts): """remove related files after an unshelve""" if not opts.get('keep'): for filetype in shelvefileextensions: shfile = shelvedfile(repo, name, filetype) if shfile.exists(): shfile.movetobackup() cleanupoldbackups(repo) # rebase currently incorrectly leaves rebasestate behind even # in successful cases, see D4696578 for details. util.unlinkpath(repo.vfs.join('rebasestate'), ignoremissing=True)
def remove(list, unlink): wlock = repo.wlock() try: if unlink: for f in list: try: util.unlinkpath(repo.wjoin(f)) except OSError, inst: if inst.errno != errno.ENOENT: raise repo[None].forget(list)
def overrideforget(orig, ui, repo, *pats, **opts): installnormalfilesmatchfn(repo[None].manifest()) result = orig(ui, repo, *pats, **opts) restorematchfn() m = scmutil.match(repo[None], pats, opts) try: repo.lfstatus = True s = repo.status(match=m, clean=True) finally: repo.lfstatus = False forget = sorted(s[0] + s[1] + s[3] + s[6]) forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] for f in forget: if lfutil.standin(f) not in repo.dirstate and not \ os.path.isdir(m.rel(lfutil.standin(f))): ui.warn( _('not removing %s: file is already untracked\n') % m.rel(f)) result = 1 for f in forget: if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the # repository and we could race in-between. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) for f in forget: if lfdirstate[f] == 'a': lfdirstate.drop(f) else: lfdirstate.remove(f) lfdirstate.write() standins = [lfutil.standin(f) for f in forget] for f in standins: util.unlinkpath(repo.wjoin(f), ignoremissing=True) repo[None].forget(standins) finally: wlock.release() return result
def overrideforget(orig, ui, repo, *pats, **opts): installnormalfilesmatchfn(repo[None].manifest()) result = orig(ui, repo, *pats, **opts) restorematchfn() m = scmutil.match(repo[None], pats, opts) try: repo.lfstatus = True s = repo.status(match=m, clean=True) finally: repo.lfstatus = False forget = sorted(s[0] + s[1] + s[3] + s[6]) forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()] for f in forget: if lfutil.standin(f) not in repo.dirstate and not \ os.path.isdir(m.rel(lfutil.standin(f))): ui.warn(_('not removing %s: file is already untracked\n') % m.rel(f)) result = 1 for f in forget: if ui.verbose or not m.exact(f): ui.status(_('removing %s\n') % m.rel(f)) # Need to lock because standin files are deleted then removed from the # repository and we could race in-between. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) for f in forget: if lfdirstate[f] == 'a': lfdirstate.drop(f) else: lfdirstate.remove(f) lfdirstate.write() standins = [lfutil.standin(f) for f in forget] for f in standins: util.unlinkpath(repo.wjoin(f), ignoremissing=True) repo[None].forget(standins) finally: wlock.release() return result
def remove(self, source): """\ This method removes files. source - the list of files to remove. string or list of strings. """ filtered = self._filter_paths(self._source_check(source)) remove, forget = [], [] m = scmutil.match(self._repo[None], filtered, {}) s = self._repo.status(match=m, clean=True) modified, added, deleted, clean = s[0], s[1], s[3], s[6] # assume forced, and purge remove, forget = modified + deleted + clean + added, added for f in remove: try: util.unlinkpath(self._repo.wjoin(f)) except OSError, inst: pass
def clear(self): hgutil.unlinkpath(self._filepath, ignoremissing=True) hgutil.unlinkpath(self._dbpath, ignoremissing=True) hgutil.unlinkpath(self._rowcountpath, ignoremissing=True) self._db = None self._hashescache = {} self._firstpull = None self._lastpull = None
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False, checked=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (checked or not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def clear(cls, repo): util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
def overridecopy(orig, ui, repo, pats, opts, rename=False): # doesn't remove largefile on rename if len(pats) < 2: # this isn't legal, let the original function deal with it return orig(ui, repo, pats, opts, rename) def makestandin(relpath): path = pathutil.canonpath(repo.root, repo.getcwd(), relpath) return os.path.join(repo.wjoin(lfutil.standin(path))) fullpats = scmutil.expandpats(pats) dest = fullpats[-1] if os.path.isdir(dest): if not os.path.isdir(makestandin(dest)): os.makedirs(makestandin(dest)) # This could copy both lfiles and normal files in one command, # but we don't want to do that. First replace their matcher to # only match normal files and run it, then replace it to just # match largefiles and run it again. nonormalfiles = False nolfiles = False installnormalfilesmatchfn(repo[None].manifest()) try: try: result = orig(ui, repo, pats, opts, rename) except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nonormalfiles = True result = 0 finally: restorematchfn() # The first rename can cause our current working directory to be removed. # In that case there is nothing left to copy/rename so just quit. try: repo.getcwd() except OSError: return result try: try: # When we call orig below it creates the standins but we don't add # them to the dir state until later so lock during that time. wlock = repo.wlock() manifest = repo[None].manifest() def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add the standin # directory; we need to remove that now for pat in pats: if match_.patkind(pat) is None and lfutil.shortname in pat: newpats.append(pat.replace(lfutil.shortname, '')) else: newpats.append(pat) match = oldmatch(ctx, newpats, opts, globbed, default) m = copy.copy(match) lfile = lambda f: lfutil.standin(f) in manifest m._files = [lfutil.standin(f) for f in m._files if lfile(f)] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn m.matchfn = lambda f: (lfutil.isstandin(f) and (f in manifest) and origmatchfn( lfutil.splitstandin(f)) or None) return m oldmatch = installmatchfn(overridematch) listpats = [] for pat in pats: if match_.patkind(pat) is not None: listpats.append(pat) else: listpats.append(makestandin(pat)) try: origcopyfile = util.copyfile copiedfiles = [] def overridecopyfile(src, dest): if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): destlfile = dest.replace(lfutil.shortname, '') if not opts['force'] and os.path.exists(destlfile): raise IOError( '', _('destination largefile already exists')) copiedfiles.append((src, dest)) origcopyfile(src, dest) util.copyfile = overridecopyfile result += orig(ui, repo, listpats, opts, rename) finally: util.copyfile = origcopyfile lfdirstate = lfutil.openlfdirstate(ui, repo) for (src, dest) in copiedfiles: if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') destlfiledir = os.path.dirname( repo.wjoin(destlfile)) or '.' if not os.path.isdir(destlfiledir): os.makedirs(destlfiledir) if rename: os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) # The file is gone, but this deletes any empty parent # directories as a side-effect. util.unlinkpath(repo.wjoin(srclfile), True) lfdirstate.remove(srclfile) else: util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.add(destlfile) lfdirstate.write() except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nolfiles = True finally: restorematchfn() wlock.release() if nolfiles and nonormalfiles: raise util.Abort(_('no files to copy')) return result
class RevMap(dict): VERSION = 1 lastpulled = util.fileproperty('_lastpulled', lambda x: x._lastpulled_file, default=0, deserializer=int) def __init__(self, revmap_path, lastpulled_path): dict.__init__(self) self._filepath = revmap_path self._lastpulled_file = lastpulled_path self._hashes = None # disable iteration to have a consistent interface with SqliteRevMap # it's less about performance since RevMap needs iteration internally self._allowiter = False self.firstpulled = 0 if os.path.isfile(self._filepath): self._load() else: self._write() def hashes(self): if self._hashes is None: self._hashes = dict((v, k) for (k, v) in self._origiteritems()) return self._hashes def branchedits(self, branch, revnum): check = lambda x: x[0][1] == branch and x[0][0] < revnum return sorted(filter(check, self._origiteritems()), reverse=True) def branchmaxrevnum(self, branch, maxrevnum): result = 0 for num, br in self._origiterkeys(): if br == branch and num <= maxrevnum and num > result: result = num return result @property def lasthash(self): lines = list(self._readmapfile()) if not lines: return None return bin(lines[-1].split(' ', 2)[1]) def revhashes(self, revnum): for key, value in self._origiteritems(): if key[0] == revnum: yield value def clear(self): self._write() dict.clear(self) self._hashes = None def batchset(self, items, lastpulled): '''Set items in batches items is an array of (rev num, branch, binary hash) For performance reason, internal in-memory state is not updated. To get an up-to-date RevMap, reconstruct the object. ''' with open(self._filepath, 'a') as f: f.write(''.join('%s %s %s\n' % (revnum, hex(binhash), br or '') for revnum, br, binhash in items)) self.lastpulled = lastpulled def _readmapfile(self): path = self._filepath try: f = open(path) except IOError, err: if err.errno != errno.ENOENT: raise return iter([]) ver = int(f.readline()) if ver == SqliteRevMap.VERSION: revmap = SqliteRevMap(self._filepath, self._lastpulled_file) tmppath = '%s.tmp' % self._filepath revmap.exportrevmapv1(tmppath) os.rename(tmppath, self._filepath) hgutil.unlinkpath(revmap._dbpath) hgutil.unlinkpath(revmap._rowcountpath, ignoremissing=True) return self._readmapfile() if ver != self.VERSION: raise error.Abort('revmap too new -- please upgrade') return f
def clearstatus(repo): 'Remove the status files' _clearrebasesetvisibiliy(repo) util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
def demo(ui, repo, *args, **opts): '''print [keywordmaps] configuration and an expansion example Show current, custom, or default keyword template maps and their expansions. Extend the current configuration by specifying maps as arguments and using -f/--rcfile to source an external hgrc file. Use -d/--default to disable current configuration. See :hg:`help templates` for information on templates and filters. ''' def demoitems(section, items): ui.write('[%s]\n' % section) for k, v in sorted(items): ui.write('%s = %s\n' % (k, v)) fn = 'demo.txt' tmpdir = tempfile.mkdtemp('', 'kwdemo.') ui.note(_('creating temporary repository at %s\n') % tmpdir) repo = localrepo.localrepository(repo.baseui, tmpdir, True) ui.setconfig('keyword', fn, '', 'keyword') svn = ui.configbool('keywordset', 'svn') # explicitly set keywordset for demo output ui.setconfig('keywordset', 'svn', svn, 'keyword') uikwmaps = ui.configitems('keywordmaps') if args or opts.get('rcfile'): ui.status(_('\n\tconfiguration using custom keyword template maps\n')) if uikwmaps: ui.status(_('\textending current template maps\n')) if opts.get('default') or not uikwmaps: if svn: ui.status(_('\toverriding default svn keywordset\n')) else: ui.status(_('\toverriding default cvs keywordset\n')) if opts.get('rcfile'): ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] fp = repo.vfs('hgrc', 'w') fp.writelines(rcmaps) fp.close() ui.readconfig(repo.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) elif opts.get('default'): if svn: ui.status(_('\n\tconfiguration using default svn keywordset\n')) else: ui.status(_('\n\tconfiguration using default cvs keywordset\n')) kwmaps = _defaultkwmaps(ui) if uikwmaps: ui.status(_('\tdisabling current template maps\n')) for k, v in kwmaps.iteritems(): ui.setconfig('keywordmaps', k, v, 'keyword') else: ui.status(_('\n\tconfiguration using current keyword template maps\n')) if uikwmaps: kwmaps = dict(uikwmaps) else: kwmaps = _defaultkwmaps(ui) uisetup(ui) reposetup(ui, repo) ui.write('[extensions]\nkeyword =\n') demoitems('keyword', ui.configitems('keyword')) demoitems('keywordset', ui.configitems('keywordset')) demoitems('keywordmaps', kwmaps.iteritems()) keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n' repo.wvfs.write(fn, keywords) repo[None].add([fn]) ui.note(_('\nkeywords written to %s:\n') % fn) ui.note(keywords) wlock = repo.wlock() try: repo.dirstate.setbranch('demobranch') finally: wlock.release() for name, cmd in ui.configitems('hooks'): if name.split('.', 1)[0].find('commit') > -1: repo.ui.setconfig('hooks', name, '', 'keyword') msg = _('hg keyword configuration and expansion example') ui.note(("hg ci -m '%s'\n" % msg)) repo.commit(text=msg) ui.status(_('\n\tkeywords expanded\n')) ui.write(repo.wread(fn)) for root, dirs, files in os.walk(tmpdir): for f in files: util.unlinkpath(repo.vfs.reljoin(root, f))
def overriderevert(orig, ui, repo, *pats, **opts): # Because we put the standins in a bad state (by updating them) # and then return them to a correct state we need to lock to # prevent others from changing them in their incorrect state. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) (modified, added, removed, missing, unknown, ignored, clean) = \ lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev()) lfdirstate.write() for lfile in modified: lfutil.updatestandin(repo, lfutil.standin(lfile)) for lfile in missing: if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): os.unlink(repo.wjoin(lfutil.standin(lfile))) try: ctx = scmutil.revsingle(repo, opts.get('rev')) oldmatch = None # for the closure def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(ctx, pats, opts, globbed, default) m = copy.copy(match) def tostandin(f): if lfutil.standin(f) in ctx: return lfutil.standin(f) elif lfutil.standin(f) in repo[None]: return None return f m._files = [tostandin(f) for f in m._files] m._files = [f for f in m._files if f is not None] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn def matchfn(f): if lfutil.isstandin(f): # We need to keep track of what largefiles are being # matched so we know which ones to update later -- # otherwise we accidentally revert changes to other # largefiles. This is repo-specific, so duckpunch the # repo object to keep the list of largefiles for us # later. if origmatchfn(lfutil.splitstandin(f)) and \ (f in repo[None] or f in ctx): lfileslist = getattr(repo, '_lfilestoupdate', []) lfileslist.append(lfutil.splitstandin(f)) repo._lfilestoupdate = lfileslist return True else: return False return origmatchfn(f) m.matchfn = matchfn return m oldmatch = installmatchfn(overridematch) scmutil.match matches = overridematch(repo[None], pats, opts) orig(ui, repo, *pats, **opts) finally: restorematchfn() lfileslist = getattr(repo, '_lfilestoupdate', []) lfcommands.updatelfiles(ui, repo, filelist=lfileslist, printmessage=False) # empty out the largefiles list so we start fresh next time repo._lfilestoupdate = [] for lfile in modified: if lfile in lfileslist: if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ in repo['.']: lfutil.writestandin(repo, lfutil.standin(lfile), repo['.'][lfile].data().strip(), 'x' in repo['.'][lfile].flags()) lfdirstate = lfutil.openlfdirstate(ui, repo) for lfile in added: standin = lfutil.standin(lfile) if standin not in ctx and (standin in matches or opts.get('all')): if lfile in lfdirstate: lfdirstate.drop(lfile) util.unlinkpath(repo.wjoin(standin)) lfdirstate.write() finally: wlock.release()
def overridecopy(orig, ui, repo, pats, opts, rename=False): # doesn't remove largefile on rename if len(pats) < 2: # this isn't legal, let the original function deal with it return orig(ui, repo, pats, opts, rename) def makestandin(relpath): path = pathutil.canonpath(repo.root, repo.getcwd(), relpath) return os.path.join(repo.wjoin(lfutil.standin(path))) fullpats = scmutil.expandpats(pats) dest = fullpats[-1] if os.path.isdir(dest): if not os.path.isdir(makestandin(dest)): os.makedirs(makestandin(dest)) # This could copy both lfiles and normal files in one command, # but we don't want to do that. First replace their matcher to # only match normal files and run it, then replace it to just # match largefiles and run it again. nonormalfiles = False nolfiles = False installnormalfilesmatchfn(repo[None].manifest()) try: try: result = orig(ui, repo, pats, opts, rename) except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nonormalfiles = True result = 0 finally: restorematchfn() # The first rename can cause our current working directory to be removed. # In that case there is nothing left to copy/rename so just quit. try: repo.getcwd() except OSError: return result try: try: # When we call orig below it creates the standins but we don't add # them to the dir state until later so lock during that time. wlock = repo.wlock() manifest = repo[None].manifest() def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add the standin # directory; we need to remove that now for pat in pats: if match_.patkind(pat) is None and lfutil.shortname in pat: newpats.append(pat.replace(lfutil.shortname, '')) else: newpats.append(pat) match = oldmatch(ctx, newpats, opts, globbed, default) m = copy.copy(match) lfile = lambda f: lfutil.standin(f) in manifest m._files = [lfutil.standin(f) for f in m._files if lfile(f)] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn m.matchfn = lambda f: (lfutil.isstandin(f) and (f in manifest) and origmatchfn(lfutil.splitstandin(f)) or None) return m oldmatch = installmatchfn(overridematch) listpats = [] for pat in pats: if match_.patkind(pat) is not None: listpats.append(pat) else: listpats.append(makestandin(pat)) try: origcopyfile = util.copyfile copiedfiles = [] def overridecopyfile(src, dest): if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): destlfile = dest.replace(lfutil.shortname, '') if not opts['force'] and os.path.exists(destlfile): raise IOError('', _('destination largefile already exists')) copiedfiles.append((src, dest)) origcopyfile(src, dest) util.copyfile = overridecopyfile result += orig(ui, repo, listpats, opts, rename) finally: util.copyfile = origcopyfile lfdirstate = lfutil.openlfdirstate(ui, repo) for (src, dest) in copiedfiles: if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.' if not os.path.isdir(destlfiledir): os.makedirs(destlfiledir) if rename: os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) # The file is gone, but this deletes any empty parent # directories as a side-effect. util.unlinkpath(repo.wjoin(srclfile), True) lfdirstate.remove(srclfile) else: util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.add(destlfile) lfdirstate.write() except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nolfiles = True finally: restorematchfn() wlock.release() if nolfiles and nonormalfiles: raise util.Abort(_('no files to copy')) return result
def updatelfiles(ui, repo, filelist=None, printmessage=None, normallookup=False): '''Update largefiles according to standins in the working directory If ``printmessage`` is other than ``None``, it means "print (or ignore, for false) message forcibly". ''' statuswriter = lfutil.getstatuswriter(ui, repo, printmessage) wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if expecthash != '': if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate an entry in largefiles # dirstate to prevent lfilesrepo.status() from reporting # missing files as removed. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: statuswriter(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) lfdirstate.write() if lfiles: statuswriter(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def clearstatus(repo): 'Remove the status files' util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) if filelist is not None: # If "local largefile" is chosen at file merging, it is # not listed in "filelist" (= dirstate syncing is # omitted), because the standin file is not changed before and # after merging. # But the status of such files may have to be changed by # merging. For example, locally modified ("M") largefile # has to become re-added("A"), if it is "normal" file in # the target revision of linear-merging. for lfile in lfdirstate: if lfile not in filelist: lfutil.synclfdirstate(repo, lfdirstate, lfile, True) lfdirstate.write() if printmessage and lfiles: ui.status( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def clear(cls, repo): util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
def shelve(ui, repo, *pats, **opts): '''interactively select changes to set aside If a list of files is omitted, all changes reported by :hg:` status` will be candidates for shelving. You will be prompted for whether to shelve changes to each modified file, and for files with multiple changes, for each change to use. The shelve command works with the Color extension to display diffs in color. On each prompt, the following responses are possible:: y - shelve this change n - skip this change s - skip remaining changes to this file f - shelve remaining changes to this file d - done, skip remaining changes and files a - shelve all changes to all remaining files q - quit, shelving no changes ? - display help ''' if not ui.interactive() and not (opts['all'] or opts['list']): raise util.Abort(_('shelve can only be run interactively')) # List all the active shelves by name and return ' if opts['list']: listshelves(ui, repo) return forced = opts['force'] or opts['append'] # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) if os.path.exists(repo.join(shelfpath)) and not forced: raise util.Abort(_('shelve data already exists')) def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:3] modified, added, removed = changes diffopts = patch.diffopts(ui, opts={'git': True, 'nodates': True}) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO(''.join(chunks)) try: ac = parsepatch(fp) except patch.PatchError, err: raise util.Abort(_('error parsing patch: %s') % err) del fp # 1. filter patch, so we have intending-to apply subset of it chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched changed = modified + added + removed newfiles = set(f for f in changed if f in contenders) if not newfiles: ui.status(_('no changes to shelve\n')) return 0 # 2. backup changed files, so we can restore them in case of error backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts['no_backup'] = True cmdutil.revert(ui, repo, repo['.'], parents, *[repo.wjoin(f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1, eolmode=None) except patch.PatchError, err: raise util.Abort(str(err)) del fp # 4. We prepared working directory according to filtered # patch. Now is the time to save the shelved changes! ui.debug("saving patch to shelve\n") if opts['append']: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(repo.wjoin(f)): os.unlink(repo.wjoin(f)) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # Our calls to copystat() here and above are a # hack to trick any editors that have f open that # we haven't modified them. # # Also note that this racy as an editor could # notice the file's mtime before we've finished # writing it. shutil.copystat(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug('removing shelve file\n') if os.path.isfile(repo.join(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err)
def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = "".join(patch.diff(repo, parents[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts["all"]) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched newfiles = set(f for f in files if f in contenders) if not newfiles: ui.status(_("no changes to shelve\n")) return 0 backupdir = repo.join("shelve-backups") try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts["no_backup"] = True cmdutil.revert(ui, repo, repo["."], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug("applying patch\n") ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1) del fp # 3c. apply filtered patch to clean repo (shelve) ui.debug("saving patch to shelve\n") if opts["append"]: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug("restoring %r to %r\n" % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug("removing shelve file\n") if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err) return 0
def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = ''.join( patch.diff(repo, parents[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched newfiles = set(f for f in files if f in contenders) if not newfiles: ui.status(_('no changes to shelve\n')) return 0 backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts['no_backup'] = True cmdutil.revert(ui, repo, repo['.'], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1) del fp # 3c. apply filtered patch to clean repo (shelve) ui.debug("saving patch to shelve\n") if opts['append']: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug('removing shelve file\n') if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err) return 0
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 standin = lfutil.standin(lfile) if standin in repo.dirstate: stat = repo.dirstate._map[standin] state, mtime = stat[0], stat[3] else: state, mtime = '?', -1 if state == 'n': if normallookup or mtime < 0: # state 'n' doesn't ensure 'clean' in this case lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'm': lfdirstate.normallookup(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) lfdirstate.write() if printmessage and lfiles: ui.status( _('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() editor = None if opts.get('edit'): editor = cmdutil.commitforceeditor lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort(_('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort( _('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort( _('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort( _('cannot specify both a ' 'revision and a source')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = repo[destf] if revf: revgen = repo.set('%lr', revf) elif srcf: revgen = repo.set('(%r)::', srcf) else: base = basef or '.' revgen = repo.set('(children(ancestor(%r, %d)) and ::(%r))::', base, dest, base) rebaseset = [c.rev() for c in revgen] if not rebaseset: repo.ui.debug('base is ancestor of destination') result = None elif not keepf and list( repo.set('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) else: result = buildstate(repo, dest, rebaseset, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort( _('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state) if stats and stats[3] > 0: raise util.Abort( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') cmdutil.duplicatecopies(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v != nullmerge: nstate[repo[k].node()] = repo[v].node() if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") if currentbookmarks: updatebookmarks(repo, nstate, currentbookmarks, **opts) clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, force): oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) newmatch = narrowspec.match(repo.root, newincludes, newexcludes) # This is essentially doing "hg outgoing" to find all local-only # commits. We will then check that the local-only commits don't # have any changes to files that will be untracked. unfi = repo.unfiltered() outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) ui.status(_('looking for local changes to affected paths\n')) localnodes = [] for n in itertools.chain(outgoing.missing, outgoing.excluded): if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): localnodes.append(n) revstostrip = unfi.revs('descendants(%ln)', localnodes) hiddenrevs = repoview.filterrevs(repo, 'visible') visibletostrip = list(repo.changelog.node(r) for r in (revstostrip - hiddenrevs)) if visibletostrip: ui.status(_('The following changeset(s) or their ancestors have ' 'local changes not on the remote:\n')) maxnodes = 10 if ui.verbose or len(visibletostrip) <= maxnodes: for n in visibletostrip: ui.status('%s\n' % node.short(n)) else: for n in visibletostrip[:maxnodes]: ui.status('%s\n' % node.short(n)) ui.status(_('...and %d more, use --verbose to list all\n') % (len(visibletostrip) - maxnodes)) if not force: raise error.Abort(_('local changes found'), hint=_('use --force-delete-local-changes to ' 'ignore')) with ui.uninterruptable(): if revstostrip: tostrip = [unfi.changelog.node(r) for r in revstostrip] if repo['.'].node() in tostrip: # stripping working copy, so move to a different commit first urev = max(repo.revs('(::%n) - %ln + null', repo['.'].node(), visibletostrip)) hg.clean(repo, urev) overrides = {('devel', 'strip-obsmarkers'): False} with ui.configoverride(overrides, 'narrow'): repair.strip(ui, unfi, tostrip, topic='narrow') todelete = [] for f, f2, size in repo.store.datafiles(): if f.startswith('data/'): file = f[5:-2] if not newmatch(file): todelete.append(f) elif f.startswith('meta/'): dir = f[5:-13] dirs = ['.'] + sorted(util.dirs({dir})) + [dir] include = True for d in dirs: visit = newmatch.visitdir(d) if not visit: include = False break if visit == 'all': break if not include: todelete.append(f) repo.destroying() with repo.transaction("narrowing"): for f in todelete: ui.status(_('deleting %s\n') % f) util.unlinkpath(repo.svfs.join(f)) repo.store.markremoved(f) _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes, oldmatch, newmatch) repo.setnarrowpats(newincludes, newexcludes) repo.destroyed()
def hg_copy(ui, repo, pats, opts, rename=False): # called with the repo lock held # # hgsep => pathname that uses "/" to separate directories # ossep => pathname that uses os.sep to separate directories cwd = repo.getcwd() targets = {} after = opts.get("after") dryrun = opts.get("dry_run") wctx = repo[None] def walkpat(pat): srcs = [] badstates = after and '?' or '?r' m = scmutil.match(repo[None], [pat], opts, globbed=True) for abs in repo.walk(m): state = repo.dirstate[abs] rel = m.rel(abs) exact = m.exact(abs) if state in badstates: if exact and state == '?': ui.warn(_('%s: not copying - file is not managed\n') % rel) if exact and state == 'r': ui.warn(_('%s: not copying - file has been marked for' ' remove\n') % rel) continue # abs: hgsep # rel: ossep srcs.append((abs, rel, exact)) return srcs # abssrc: hgsep # relsrc: ossep # otarget: ossep def copyfile(abssrc, relsrc, otarget, exact): abstarget = scmutil.canonpath(repo.root, cwd, otarget) reltarget = repo.pathto(abstarget, cwd) target = repo.wjoin(abstarget) src = repo.wjoin(abssrc) state = repo.dirstate[abstarget] scmutil.checkportable(ui, abstarget) # check for collisions prevsrc = targets.get(abstarget) if prevsrc is not None: ui.warn(_('%s: not overwriting - %s collides with %s\n') % (reltarget, repo.pathto(abssrc, cwd), repo.pathto(prevsrc, cwd))) return # check for overwrites exists = os.path.lexists(target) if not after and exists or after and state in 'mn': if not opts['force']: ui.warn(_('%s: not overwriting - file exists\n') % reltarget) return if after: if not exists: if rename: ui.warn(_('%s: not recording move - %s does not exist\n') % (relsrc, reltarget)) else: ui.warn(_('%s: not recording copy - %s does not exist\n') % (relsrc, reltarget)) return elif not dryrun: try: if exists: os.unlink(target) targetdir = os.path.dirname(target) or '.' if not os.path.isdir(targetdir): os.makedirs(targetdir) util.copyfile(src, target) srcexists = True except IOError, inst: if inst.errno == errno.ENOENT: ui.warn(_('%s: deleted in working copy\n') % relsrc) srcexists = False else: ui.warn(_('%s: cannot copy - %s\n') % (relsrc, inst.strerror)) return True # report a failure if ui.verbose or not exact: if rename: ui.status(_('moving %s to %s\n') % (relsrc, reltarget)) else: ui.status(_('copying %s to %s\n') % (relsrc, reltarget)) targets[abstarget] = abssrc # fix up dirstate scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd) if rename and not dryrun: if not after and srcexists: util.unlinkpath(repo.wjoin(abssrc)) wctx.forget([abssrc])
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): msg = _("interactive history editing is supported by the " "'histedit' extension (see 'hg help histedit')") raise util.Abort(msg) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise util.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = scmutil.revsingle(repo, destf) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status(_('empty "rev" revision set - ' 'nothing to rebase\n')) return 1 elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status(_('empty "source" revision set - ' 'nothing to rebase\n')) return 1 rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status(_('empty "base" revision set - ' "can't compute rebase set\n")) return 1 commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor lazyness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status(_('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return 1 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 root = min(rebaseset) if not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._bookmarkcurrent if activebookmark: bookmarks.unsetcurrent(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) p1, p2 = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = repo[p2].rev() != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newrev clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.setcurrent(repo, activebookmark) finally: release(lock, wlock)
def updatelfiles(ui, repo, filelist=None, printmessage=True): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 state = repo.dirstate[lfutil.standin(lfile)] if state == 'n': # When rebasing, we need to synchronize the standin and the # largefile, because otherwise the largefile will get reverted. # But for commit's sake, we have to mark the file as unclean. if getattr(repo, "_isrebasing", False): lfdirstate.normallookup(lfile) else: lfdirstate.normal(lfile) elif state == 'r': lfdirstate.remove(lfile) elif state == 'a': lfdirstate.add(lfile) elif state == '?': lfdirstate.drop(lfile) lfdirstate.write() if printmessage and lfiles: ui.status(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def shelve(ui, repo, *pats, **opts): '''interactively select changes to set aside If a list of files is omitted, all changes reported by :hg:` status` will be candidates for shelving. You will be prompted for whether to shelve changes to each modified file, and for files with multiple changes, for each change to use. The shelve command works with the Color extension to display diffs in color. On each prompt, the following responses are possible:: y - shelve this change n - skip this change s - skip remaining changes to this file f - shelve remaining changes to this file d - done, skip remaining changes and files a - shelve all changes to all remaining files q - quit, shelving no changes ? - display help ''' if not ui.interactive(): raise util.Abort(_('shelve can only be run interactively')) # List all the active shelves by name and return ' if opts['list']: listshelves(ui, repo) return forced = opts['force'] or opts['append'] # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) if os.path.exists(repo.join(shelfpath)) and not forced: raise util.Abort(_('shelve data already exists')) def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:3] modified, added, removed = changes diffopts = patch.diffopts(ui, opts={'git': True, 'nodates': True}) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO(''.join(chunks)) try: ac = parsepatch(fp) except patch.PatchError, err: raise util.Abort(_('error parsing patch: %s') % err) del fp # 1. filter patch, so we have intending-to apply subset of it chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched changed = modified + added + removed newfiles = set(f for f in changed if f in contenders) if not newfiles: ui.status(_('no changes to shelve\n')) return 0 # 2. backup changed files, so we can restore them in case of error backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts['no_backup'] = True cmdutil.revert(ui, repo, repo['.'], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1, eolmode=None) except patch.PatchError, err: raise util.Abort(str(err)) del fp # 4. We prepared working directory according to filtered # patch. Now is the time to save the shelved changes! ui.debug("saving patch to shelve\n") if opts['append']: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # Our calls to copystat() here and above are a # hack to trick any editors that have f open that # we haven't modified them. # # Also note that this racy as an editor could # notice the file's mtime before we've finished # writing it. shutil.copystat(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug('removing shelve file\n') if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err)
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev state = {} skipped = set() targetancestors = set() editor = None if opts.get('edit'): editor = cmdutil.commitforceeditor lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear borken state') raise util.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise util.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if not destf: # Destination defaults to the latest revision in the # current branch branch = repo[None].branch() dest = repo[branch] else: dest = scmutil.revsingle(repo, destf) if revf: rebaseset = scmutil.revrange(repo, revf) elif srcf: src = scmutil.revrange(repo, [srcf]) rebaseset = repo.revs('(%ld)::', src) else: base = scmutil.revrange(repo, [basef or '.']) rebaseset = repo.revs( '(children(ancestor(%ld, %d)) and ::(%ld))::', base, dest, base) if rebaseset: root = min(rebaseset) else: root = None if not rebaseset: repo.ui.debug('base is ancestor of destination\n') result = None elif (not (keepf or obsolete._enabled) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) else: result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 elif not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) else: originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise util.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._bookmarkcurrent if activebookmark: bookmarks.unsetcurrent(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) p1, p2 = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state, collapsef) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') cmdutil.duplicatecopies(repo, rev, target) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor) else: # Skip commit if we are collapsing repo.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor) for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newrev clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.setcurrent(repo, activebookmark) finally: release(lock, wlock)
def overriderevert(orig, ui, repo, *pats, **opts): # Because we put the standins in a bad state (by updating them) # and then return them to a correct state we need to lock to # prevent others from changing them in their incorrect state. wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) (modified, added, removed, missing, unknown, ignored, clean) = \ lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev()) lfdirstate.write() for lfile in modified: lfutil.updatestandin(repo, lfutil.standin(lfile)) for lfile in missing: if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): os.unlink(repo.wjoin(lfutil.standin(lfile))) try: ctx = scmutil.revsingle(repo, opts.get('rev')) oldmatch = None # for the closure def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(ctx, pats, opts, globbed, default) m = copy.copy(match) def tostandin(f): if lfutil.standin(f) in ctx: return lfutil.standin(f) elif lfutil.standin(f) in repo[None]: return None return f m._files = [tostandin(f) for f in m._files] m._files = [f for f in m._files if f is not None] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn def matchfn(f): if lfutil.isstandin(f): # We need to keep track of what largefiles are being # matched so we know which ones to update later -- # otherwise we accidentally revert changes to other # largefiles. This is repo-specific, so duckpunch the # repo object to keep the list of largefiles for us # later. if origmatchfn(lfutil.splitstandin(f)) and \ (f in repo[None] or f in ctx): lfileslist = getattr(repo, '_lfilestoupdate', []) lfileslist.append(lfutil.splitstandin(f)) repo._lfilestoupdate = lfileslist return True else: return False return origmatchfn(f) m.matchfn = matchfn return m oldmatch = installmatchfn(overridematch) scmutil.match matches = overridematch(repo[None], pats, opts) orig(ui, repo, *pats, **opts) finally: restorematchfn() lfileslist = getattr(repo, '_lfilestoupdate', []) lfcommands.updatelfiles(ui, repo, filelist=lfileslist, printmessage=False) # empty out the largefiles list so we start fresh next time repo._lfilestoupdate = [] for lfile in modified: if lfile in lfileslist: if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ in repo['.']: lfutil.writestandin(repo, lfutil.standin(lfile), repo['.'][lfile].data().strip(), 'x' in repo['.'][lfile].flags()) lfdirstate = lfutil.openlfdirstate(ui, repo) for lfile in added: standin = lfutil.standin(lfile) if standin not in ctx and (standin in matches or opts.get('all')): if lfile in lfdirstate: lfdirstate.drop(lfile) util.unlinkpath(repo.wjoin(standin)) lfdirstate.write() finally: wlock.release()
def updatelfiles(ui, repo, filelist=None, printmessage=True, normallookup=False): wlock = repo.wlock() try: lfdirstate = lfutil.openlfdirstate(ui, repo) lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate) if filelist is not None: filelist = set(filelist) lfiles = [f for f in lfiles if f in filelist] update = {} updated, removed = 0, 0 for lfile in lfiles: abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): if (os.path.exists(absstandin + '.orig') and os.path.exists(abslfile)): shutil.copyfile(abslfile, abslfile + '.orig') util.unlinkpath(absstandin + '.orig') expecthash = lfutil.readstandin(repo, lfile) if (expecthash != '' and (not os.path.exists(abslfile) or expecthash != lfutil.hashfile(abslfile))): if lfile not in repo[None]: # not switched to normal file util.unlinkpath(abslfile, ignoremissing=True) # use normallookup() to allocate entry in largefiles # dirstate, because lack of it misleads # lfilesrepo.status() into recognition that such cache # missing files are REMOVED. lfdirstate.normallookup(lfile) update[lfile] = expecthash else: # Remove lfiles for which the standin is deleted, unless the # lfile is added to the repository again. This happens when a # largefile is converted back to a normal file: the standin # disappears, but a new (normal) file appears as the lfile. if (os.path.exists(abslfile) and repo.dirstate.normalize(lfile) not in repo[None]): util.unlinkpath(abslfile) removed += 1 # largefile processing might be slow and be interrupted - be prepared lfdirstate.write() if lfiles: if printmessage: ui.status(_('getting changed largefiles\n')) cachelfiles(ui, repo, None, lfiles) for lfile in lfiles: update1 = 0 expecthash = update.get(lfile) if expecthash: if not lfutil.copyfromcache(repo, expecthash, lfile): # failed ... but already removed and set to normallookup continue # Synchronize largefile dirstate to the last modified # time of the file lfdirstate.normal(lfile) update1 = 1 # copy the state of largefile standin from the repository's # dirstate to its state in the lfdirstate. abslfile = repo.wjoin(lfile) absstandin = repo.wjoin(lfutil.standin(lfile)) if os.path.exists(absstandin): mode = os.stat(absstandin).st_mode if mode != os.stat(abslfile).st_mode: os.chmod(abslfile, mode) update1 = 1 updated += update1 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup) if filelist is not None: # If "local largefile" is chosen at file merging, it is # not listed in "filelist" (= dirstate syncing is # omitted), because the standin file is not changed before and # after merging. # But the status of such files may have to be changed by # merging. For example, locally modified ("M") largefile # has to become re-added("A"), if it is "normal" file in # the target revision of linear-merging. for lfile in lfdirstate: if lfile not in filelist: lfutil.synclfdirstate(repo, lfdirstate, lfile, True) lfdirstate.write() if printmessage and lfiles: ui.status(_('%d largefiles updated, %d removed\n') % (updated, removed)) finally: wlock.release()
def clearstatus(repo): 'Remove the status files' if os.path.exists(repo.join("rebasestate")): util.unlinkpath(repo.join("rebasestate"))
def _narrow( ui, repo, remote, commoninc, oldincludes, oldexcludes, newincludes, newexcludes, force, backup, ): oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) newmatch = narrowspec.match(repo.root, newincludes, newexcludes) # This is essentially doing "hg outgoing" to find all local-only # commits. We will then check that the local-only commits don't # have any changes to files that will be untracked. unfi = repo.unfiltered() outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc) ui.status(_(b'looking for local changes to affected paths\n')) progress = ui.makeprogress( topic=_(b'changesets'), unit=_(b'changesets'), total=len(outgoing.missing) + len(outgoing.excluded), ) localnodes = [] with progress: for n in itertools.chain(outgoing.missing, outgoing.excluded): progress.increment() if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): localnodes.append(n) revstostrip = unfi.revs(b'descendants(%ln)', localnodes) hiddenrevs = repoview.filterrevs(repo, b'visible') visibletostrip = list( repo.changelog.node(r) for r in (revstostrip - hiddenrevs)) if visibletostrip: ui.status( _(b'The following changeset(s) or their ancestors have ' b'local changes not on the remote:\n')) maxnodes = 10 if ui.verbose or len(visibletostrip) <= maxnodes: for n in visibletostrip: ui.status(b'%s\n' % short(n)) else: for n in visibletostrip[:maxnodes]: ui.status(b'%s\n' % short(n)) ui.status( _(b'...and %d more, use --verbose to list all\n') % (len(visibletostrip) - maxnodes)) if not force: raise error.StateError( _(b'local changes found'), hint=_(b'use --force-delete-local-changes to ignore'), ) with ui.uninterruptible(): if revstostrip: tostrip = [unfi.changelog.node(r) for r in revstostrip] if repo[b'.'].node() in tostrip: # stripping working copy, so move to a different commit first urev = max( repo.revs( b'(::%n) - %ln + null', repo[b'.'].node(), visibletostrip, )) hg.clean(repo, urev) overrides = {(b'devel', b'strip-obsmarkers'): False} if backup: ui.status(_(b'moving unwanted changesets to backup\n')) else: ui.status(_(b'deleting unwanted changesets\n')) with ui.configoverride(overrides, b'narrow'): repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup) todelete = [] for t, f, f2, size in repo.store.datafiles(): if f.startswith(b'data/'): file = f[5:-2] if not newmatch(file): todelete.append(f) elif f.startswith(b'meta/'): dir = f[5:-13] dirs = sorted(pathutil.dirs({dir})) + [dir] include = True for d in dirs: visit = newmatch.visitdir(d) if not visit: include = False break if visit == b'all': break if not include: todelete.append(f) repo.destroying() with repo.transaction(b'narrowing'): # Update narrowspec before removing revlogs, so repo won't be # corrupt in case of crash repo.setnarrowpats(newincludes, newexcludes) for f in todelete: ui.status(_(b'deleting %s\n') % f) util.unlinkpath(repo.svfs.join(f)) repo.store.markremoved(f) ui.status(_(b'deleting unwanted files from working copy\n')) with repo.dirstate.parentchange(): narrowspec.updateworkingcopy(repo, assumeclean=True) narrowspec.copytoworkingcopy(repo) repo.destroyed()
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. In its default configuration, Mercurial will prevent you from rebasing published changes. See :hg:`help phases` for details. If you don't specify a destination changeset (``-d/--dest``), rebase uses the current branch tip as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. For advanced usage, a third way is available through the ``--rev`` option. It allows you to specify an arbitrary set of changesets to rebase. Descendants of revs you specify with this option are not automatically included in the rebase. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the branch tip of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. .. container:: verbose Examples: - move "local changes" (current commit back to branching point) to the current branch tip after a pull:: hg rebase - move a single changeset to the stable branch:: hg rebase -r 5f493448 -d stable - splice a commit and all its descendants onto another part of history:: hg rebase --source c0c3 --dest 4cf9 - rebase everything on a branch marked by a bookmark onto the default branch:: hg rebase --base myfeature --dest default - collapse a sequence of changes into a single commit:: hg rebase --collapse -r 1520:1525 -d . - move a named branch while preserving its name:: hg rebase -r "branch(featureX)" -d 1.3 --keepbranches Returns 0 on success, 1 if nothing to rebase or there are unresolved conflicts. """ originalwd = target = None activebookmark = None external = nullrev # Mapping between the old revision id and either what is the new rebased # revision or what needs to be done with the old revision. The state dict # will be what contains most of the rebase progress state. state = {} skipped = set() targetancestors = set() lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) revf = opts.get('rev', []) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) e = opts.get('extrafn') # internal, used by e.g. hgsubversion extrafns = [_savegraft] if e: extrafns = [e] keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if opts.get('interactive'): try: if extensions.find('histedit'): enablehistedit = '' except KeyError: enablehistedit = " --config extensions.histedit=" help = "hg%s help -e histedit" % enablehistedit msg = _("interactive history editing is supported by the " "'histedit' extension (see \"%s\")") % help raise error.Abort(msg) if collapsemsg and not collapsef: raise error.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise error.Abort(_('cannot use both abort and continue')) if collapsef: raise error.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.Abort( _('abort and continue do not allow specifying revisions')) if abortf and opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) except error.RepoLookupError: if abortf: clearstatus(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 else: msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise error.Abort(msg, hint=hint) if abortf: return abort(repo, originalwd, target, state, activebookmark=activebookmark) else: if srcf and basef: raise error.Abort(_('cannot specify both a ' 'source and a base')) if revf and basef: raise error.Abort(_('cannot specify both a ' 'revision and a base')) if revf and srcf: raise error.Abort(_('cannot specify both a ' 'revision and a source')) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) if destf: dest = scmutil.revsingle(repo, destf) else: dest = repo[_destrebase(repo)] destf = str(dest) if revf: rebaseset = scmutil.revrange(repo, revf) if not rebaseset: ui.status(_('empty "rev" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() elif srcf: src = scmutil.revrange(repo, [srcf]) if not src: ui.status(_('empty "source" revision set - ' 'nothing to rebase\n')) return _nothingtorebase() rebaseset = repo.revs('(%ld)::', src) assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) if not base: ui.status(_('empty "base" revision set - ' "can't compute rebase set\n")) return _nothingtorebase() commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() if commonanc is not None: rebaseset = repo.revs('(%d::(%ld) - %d)::', commonanc, base, commonanc) else: rebaseset = [] if not rebaseset: # transform to list because smartsets are not comparable to # lists. This should be improved to honor laziness of # smartset. if list(base) == [dest.rev()]: if basef: ui.status(_('nothing to rebase - %s is both "base"' ' and destination\n') % dest) else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) elif not repo.revs('%ld - ::%d', base, dest): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % ('+'.join(str(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' 'directory parent is already an ' 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % ('+'.join(str(repo[r]) for r in base), dest)) return _nothingtorebase() allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise error.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) obsoletenotrebased = {} if ui.configbool('experimental', 'rebaseskipobsolete'): rebasesetrevs = set(rebaseset) obsoletenotrebased = _computeobsoletenotrebased(repo, rebasesetrevs, dest) # - plain prune (no successor) changesets are rebased # - split changesets are not rebased if at least one of the # changeset resulting from the split is an ancestor of dest rebaseset = rebasesetrevs - set(obsoletenotrebased) result = buildstate(repo, dest, rebaseset, collapsef, obsoletenotrebased) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return _nothingtorebase() root = min(rebaseset) if not keepf and not repo[root].mutable(): raise error.Abort(_("can't rebase public changeset %s") % repo[root], hint=_('see "hg help phases" for details')) originalwd, target, state = result if collapsef: targetancestors = repo.changelog.ancestors([target], inclusive=True) external = externalparent(repo, state, targetancestors) if dest.closesbranch() and not keepbranchesf: ui.status(_('reopening closed branch head %s\n') % dest) if keepbranchesf: # insert _savebranch at the start of extrafns so if # there's a user-provided extrafn it can clobber branch if # desired extrafns.insert(0, _savebranch) if collapsef: branches = set() for rev in state: branches.add(repo[rev].branch()) if len(branches) > 1: raise error.Abort(_('cannot collapse multiple named ' 'branches')) # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) # Keep track of the current bookmarks in order to reset them later currentbookmarks = repo._bookmarks.copy() activebookmark = activebookmark or repo._activebookmark if activebookmark: bookmarks.deactivate(repo) extrafn = _makeextrafn(extrafns) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: ctx = repo[rev] desc = '%d:%s "%s"' % (ctx.rev(), ctx, ctx.description().split('\n', 1)[0]) names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node()) if names: desc += ' (%s)' % ' '.join(names) pos += 1 if state[rev] == revtodo: ui.status(_('rebasing %s\n') % desc) ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)), _('changesets'), total) p1, p2, base = defineparents(repo, rev, target, state, targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'rebase') stats = rebasenode(repo, rev, p1, base, state, collapsef, target) if stats and stats[3] > 0: raise error.InterventionRequired( _('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '', 'rebase') if not collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') editor = cmdutil.getcommiteditor(editform=editform, **opts) newnode = concludenode(repo, rev, p1, p2, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) else: # Skip commit if we are collapsing repo.dirstate.beginparentchange() repo.setparents(repo[p1].node()) repo.dirstate.endparentchange() newnode = None # Update the state if newnode is not None: state[rev] = repo[newnode].rev() ui.debug('rebased as %s\n' % short(newnode)) else: if not collapsef: ui.warn(_('note: rebase of %d:%s created no changes ' 'to commit\n') % (rev, ctx)) skipped.add(rev) state[rev] = p1 ui.debug('next revision set to %s\n' % p1) elif state[rev] == nullmerge: ui.debug('ignoring null merge rebase of %s\n' % rev) elif state[rev] == revignored: ui.status(_('not rebasing ignored %s\n') % desc) elif state[rev] == revprecursor: targetctx = repo[obsoletenotrebased[rev]] desctarget = '%d:%s "%s"' % (targetctx.rev(), targetctx, targetctx.description().split('\n', 1)[0]) msg = _('note: not rebasing %s, already in destination as %s\n') ui.status(msg % (desc, desctarget)) else: ui.status(_('already rebased %s as %s\n') % (desc, repo[state[rev]])) ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2, _base = defineparents(repo, min(state), target, state, targetancestors) editopt = opts.get('edit') editform = 'rebase.collapse' if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] > nullmerge: commitmsg += '\n* %s' % repo[rebased].description() editopt = True editor = cmdutil.getcommiteditor(edit=editopt, editform=editform) newnode = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn, editor=editor, keepbranches=keepbranchesf) if newnode is None: newrev = target else: newrev = repo[newnode].rev() for oldrev in state.iterkeys(): if state[oldrev] > nullmerge: state[oldrev] = newrev if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if currentbookmarks: # Nodeids are needed to reset bookmarks nstate = {} for k, v in state.iteritems(): if v > nullmerge: nstate[repo[k].node()] = repo[v].node() # XXX this is the same as dest.node() for the non-continue path -- # this should probably be cleaned up targetnode = repo[target].node() # restore original working directory # (we do this before stripping) newwd = state.get(originalwd, originalwd) if newwd < 0: # original directory is a parent of rebase set root or ignored newwd = originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) hg.updaterepo(repo, newwd, False) if not keepf: collapsedas = None if collapsef: collapsedas = newnode clearrebased(ui, repo, state, skipped, collapsedas) if currentbookmarks: updatebookmarks(repo, targetnode, nstate, currentbookmarks) if activebookmark not in repo._bookmarks: # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) if (activebookmark and repo['.'].node() == repo._bookmarks[activebookmark]): bookmarks.activate(repo, activebookmark) finally: release(lock, wlock)
def clearstatus(repo): 'Remove the status files' if os.path.exists(repo.join("rebasestate")): util.unlinkpath(repo.join("rebasestate"))
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) collapsemsg = cmdutil.logmessage(ui, opts) extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if collapsemsg and not collapsef: raise util.Abort( _('message can only be specified with collapse')) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) if opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bailifchanged(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: assert not extrafn, 'cannot use both keepbranches and extrafn' def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) stats = rebasenode(repo, rev, p1, state) if stats and stats[3] > 0: raise util.Abort(_('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) finally: ui.setconfig('ui', 'forcemerge', '') updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) if collapsemsg: commitmsg = collapsemsg else: commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlinkpath(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def demo(ui, repo, *args, **opts): '''print [keywordmaps] configuration and an expansion example Show current, custom, or default keyword template maps and their expansions. Extend the current configuration by specifying maps as arguments and using -f/--rcfile to source an external hgrc file. Use -d/--default to disable current configuration. See :hg:`help templates` for information on templates and filters. ''' def demoitems(section, items): ui.write('[%s]\n' % section) for k, v in sorted(items): ui.write('%s = %s\n' % (k, v)) fn = 'demo.txt' tmpdir = tempfile.mkdtemp('', 'kwdemo.') ui.note(_('creating temporary repository at %s\n') % tmpdir) repo = localrepo.localrepository(repo.baseui, tmpdir, True) ui.setconfig('keyword', fn, '', 'keyword') svn = ui.configbool('keywordset', 'svn') # explicitly set keywordset for demo output ui.setconfig('keywordset', 'svn', svn, 'keyword') uikwmaps = ui.configitems('keywordmaps') if args or opts.get('rcfile'): ui.status(_('\n\tconfiguration using custom keyword template maps\n')) if uikwmaps: ui.status(_('\textending current template maps\n')) if opts.get('default') or not uikwmaps: if svn: ui.status(_('\toverriding default svn keywordset\n')) else: ui.status(_('\toverriding default cvs keywordset\n')) if opts.get('rcfile'): ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] fp = repo.vfs('hgrc', 'w') fp.writelines(rcmaps) fp.close() ui.readconfig(repo.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) elif opts.get('default'): if svn: ui.status(_('\n\tconfiguration using default svn keywordset\n')) else: ui.status(_('\n\tconfiguration using default cvs keywordset\n')) kwmaps = _defaultkwmaps(ui) if uikwmaps: ui.status(_('\tdisabling current template maps\n')) for k, v in kwmaps.iteritems(): ui.setconfig('keywordmaps', k, v, 'keyword') else: ui.status(_('\n\tconfiguration using current keyword template maps\n')) if uikwmaps: kwmaps = dict(uikwmaps) else: kwmaps = _defaultkwmaps(ui) uisetup(ui) reposetup(ui, repo) ui.write('[extensions]\nkeyword =\n') demoitems('keyword', ui.configitems('keyword')) demoitems('keywordset', ui.configitems('keywordset')) demoitems('keywordmaps', kwmaps.iteritems()) keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n' repo.wvfs.write(fn, keywords) repo[None].add([fn]) ui.note(_('\nkeywords written to %s:\n') % fn) ui.note(keywords) wlock = repo.wlock() try: repo.dirstate.setbranch('demobranch') finally: wlock.release() for name, cmd in ui.configitems('hooks'): if name.split('.', 1)[0].find('commit') > -1: repo.ui.setconfig('hooks', name, '', 'keyword') msg = _('hg keyword configuration and expansion example') ui.note(("hg ci -m '%s'\n" % msg)) repo.commit(text=msg) ui.status(_('\n\tkeywords expanded\n')) ui.write(repo.wread(fn)) for root, dirs, files in os.walk(tmpdir): for f in files: util.unlinkpath(repo.vfs.reljoin(root, f))