def _apply(self, filename): try: util.unlink(filename) except OSError, inst: if inst.errno != errno.ENOENT: repo.ui.warn(_("update failed to remove %s: %s!\n") % (filename, inst.strerror))
def cleanup(self, repo, pats, opts): '''removes all changes from the working copy and makes it so there isn't a patch applied''' # find added files in the user's chosen set m = cmdutil.match(repo, pats, opts) added = repo.status(match=m)[1] revertopts = { 'include': opts.get('include'), 'exclude': opts.get('exclude'), 'date': None, 'all': True, 'rev': '.', 'no_backup': True, } self.ui.pushbuffer() # silence revert try: commands.revert(self.ui, repo, *pats, **revertopts) # finish the job of reverting added files (safe because they are # saved in the attic patch) for fn in added: self.ui.status(_('removing %s\n') % fn) util.unlink(fn) finally: self.ui.popbuffer() self.applied = '' self.persiststate()
def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, '')) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + '.tmp' tmpfile = util.atomictempfile( tmpname, createmode=self.repo.store.createmode) try: gothash = self._getfile(tmpfile, filename, hash) except StoreError as err: self.ui.warn(err.longmessage()) gothash = "" tmpfile.close() if gothash != hash: if gothash != "": self.ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, gothash)) util.unlink(tmpname) return False util.rename(tmpname, storefilename) lfutil.linktousercache(self.repo, hash) return True
def accept(self): cmdopts = {} if hasattr(self, 'chk'): if self.command == 'revert': cmdopts['no_backup'] = self.chk.isChecked() elif self.command == 'remove': cmdopts['force'] = self.chk.isChecked() files = self.stwidget.getChecked() if not files: qtlib.WarningMsgBox(_('No files selected'), _('No operation to perform'), parent=self) return self.repo.bfstatus = True self.repo.lfstatus = True repostate = self.repo.status() self.repo.bfstatus = False self.repo.lfstatus = False if self.command == 'remove': if not self.chk.isChecked(): modified = repostate[0] selmodified = [] for wfile in files: if wfile in modified: selmodified.append(wfile) if selmodified: prompt = qtlib.CustomPrompt( _('Confirm Remove'), _('You have selected one or more files that have been ' 'modified. By default, these files will not be ' 'removed. What would you like to do?'), self, (_('Remove &Unmodified Files'), _('Remove &All Selected Files'), _('Cancel')), 0, 2, selmodified) ret = prompt.run() if ret == 1: cmdopts['force'] = True elif ret == 2: return unknown, ignored = repostate[4:6] for wfile in files: if wfile in unknown or wfile in ignored: try: util.unlink(wfile) except EnvironmentError: pass files.remove(wfile) elif self.command == 'add': if 'largefiles' in self.repo.extensions(): self.addWithPrompt(files) return if files: cmdline = hglib.buildcmdargs(self.command, *files, **cmdopts) self.files = files self.cmd.run(cmdline) else: self.reject()
def storeuntracked(repo, untracked): if not untracked: return os.mkdir(repo.join('tasks/untrackedbackup')) for f in untracked: shaname = util.sha1(f).hexdigest() util.copyfile(util.pathto(repo.root, None, f), repo.join('tasks/untrackedbackup/%s' % shaname)) util.unlink(util.pathto(repo.root, None, f))
def unsetcurrent(repo): wlock = repo.wlock() try: try: util.unlink(repo.join('bookmarks.current')) repo._bookmarkcurrent = None except OSError, inst: if inst.errno != errno.ENOENT: raise finally: wlock.release()
def _remove_many(repo, filenames): '''remove multiple files in repo's working dir, ignoring "no such file" errors''' # copied from mercurial.context.workingctx.remove() in Mercurial # 1.8, since that method was removed in 1.9 for fn in filenames: try: util.unlink(repo.wjoin(fn)) except OSError, inst: if inst.errno != errno.ENOENT: raise
def get(self, files): '''Get the specified largefiles from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfully downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui util.makedirs(lfutil.storepath(self.repo, '')) at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: ui.progress(_('getting largefiles'), at, unit='lfile', total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) if not available.get(hash): ui.warn(_('%s: largefile %s not available from %s\n') % (filename, hash, self.url)) missing.append(filename) continue storefilename = lfutil.storepath(self.repo, hash) tmpfile = util.atomictempfile(storefilename + '.tmp', createmode=self.repo.store.createmode) try: hhash = self._getfile(tmpfile, filename, hash) except StoreError, err: ui.warn(err.longmessage()) hhash = "" tmpfile.close() if hhash != hash: if hhash != "": ui.warn(_('%s: data corruption (expected %s, got %s)\n') % (filename, hash, hhash)) util.unlink(storefilename + '.tmp') missing.append(filename) continue util.rename(storefilename + '.tmp', storefilename) lfutil.linktousercache(self.repo, hash) success.append((filename, hhash))
def copyfromcache(repo, hash, filename): '''Copy the specified largefile from the repo or system cache to filename in the repository. Return true on success or false if the file was not found in either cache (which should not happened: this is meant to be called only after ensuring that the needed largefile exists in the cache).''' path = findfile(repo, hash) if path is None: return False util.makedirs(os.path.dirname(repo.wjoin(filename))) # The write may fail before the file is fully written, but we # don't use atomic writes in the working copy. dest = repo.wjoin(filename) with open(path, 'rb') as srcfd: with open(dest, 'wb') as destfd: gothash = copyandhash(srcfd, destfd) if gothash != hash: repo.ui.warn(_('%s: data corruption in %s with hash %s\n') % (filename, path, gothash)) util.unlink(dest) return False return True
def bigupdate(ui, repo, *pats, **opts): '''fetch files from versions directory as recorded in '.bigfiles'. Also complain about necessary files missing in the version directory''' ds = read_bigfiledirstate(ui, repo) bigfiles = parse_bigfiles(repo) tracked_gotbig, added_big, modified, removed, gotsmall, \ missinginrepo = _bigstatus(ui, repo, pats, opts, ds, bigfiles) brepo = bigfiles_repo(ui) tocopy = removed if opts['clean']: tocopy = tocopy+modified for file in tocopy: f = repo.wjoin(file) hash= bigfiles[file] rf = "%s/%s.%s" % (brepo, file, hash) ui.write(_("fetching %s\n") % rf) if not opts['dry_run']: util.makedirs(os.path.dirname(f)) if os.path.exists(f): util.unlink(f) if os.path.exists(rf): util.copyfile(rf, f) else: fo = open(f, 'wb') rfo = gzip.open(rf + '.gz', 'rb') def read10Mb(): return rfo.read(1024*1024*10) for chunk in iter(read10Mb, ''): fo.write(chunk) fo.close() rfo.close() if missinginrepo: ui.write(_("\nNeeded files missing in bigrepo %s:\n") % brepo) for file in missinginrepo: hash = bigfiles[file] ui.write("%s.%s\n" % (file, hash)) write_bigfiledirstate(ui, repo, ds)
def copyfromcache(repo, hash, filename): '''Copy the specified largefile from the repo or system cache to filename in the repository. Return true on success or false if the file was not found in either cache (which should not happened: this is meant to be called only after ensuring that the needed largefile exists in the cache).''' path = findfile(repo, hash) if path is None: return False util.makedirs(os.path.dirname(repo.wjoin(filename))) # The write may fail before the file is fully written, but we # don't use atomic writes in the working copy. dest = repo.wjoin(filename) with open(path, 'rb') as srcfd: with open(dest, 'wb') as destfd: gothash = copyandhash(srcfd, destfd) if gothash != hash: repo.ui.warn( _('%s: data corruption in %s with hash %s\n') % (filename, path, gothash)) util.unlink(dest) return False return True
'removed. What would you like to do?'), self, (_('Remove &Unmodified Files'), _('Remove &All Selected Files'), _('Cancel')), 0, 2, selmodified) ret = prompt.run() if ret == 1: cmdopts['force'] = True elif ret == 2: return unknown, ignored = repostate[4:6] for wfile in files: if wfile in unknown or wfile in ignored: try: util.unlink(wfile) except EnvironmentError: pass files.remove(wfile) elif self.command == 'add': if 'largefiles' in self.repo.extensions(): self.addWithPrompt(files) return if files: self._runCommand(files, [], cmdopts) else: self.reject() def reject(self): if not self._cmdsession.isFinished(): self._cmdsession.abort()
def _apply(self, filename): try: util.unlink(filename) except OSError, inst: pass
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if detachf: raise error.ParseError( 'rebase', _('cannot use detach with continue or abort')) if srcf or basef or destf: raise error.ParseError( 'rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError( 'rebase', _('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise error.ParseError( 'rebase', _('detach requires a revision to be specified')) if basef: raise error.ParseError( 'rebase', _('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) for rev in sorted(state): if state[rev] == -1: ui.debug("rebasing %d:%s\n" % (rev, repo[rev])) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort( _('fix unresolved conflicts with hg ' 'resolve then run hg rebase --continue')) updatedirstate(repo, rev, target, p2) if not collapsef: extra = {'rebase_source': repo[rev].hex()} if extrafn: extrafn(repo[rev], extra) newrev = concludenode(repo, rev, p1, p2, extra=extra) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extra=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: repair.strip(ui, repo, repo[min(rebased)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def applytomirrors(repo, status, sourcepath, mirrors, action): """Applies the changes that are in the sourcepath to all the mirrors.""" mirroredfiles = set() # Detect which mirror this file comes from sourcemirror = None for mirror in mirrors: if sourcepath.startswith(mirror): sourcemirror = mirror break if not sourcemirror: raise error.Abort(_("unable to detect source mirror of '%s'") % (sourcepath,)) relpath = sourcepath[len(sourcemirror):] # Apply the change to each mirror one by one allchanges = set(status.modified + status.removed + status.added) for mirror in mirrors: if mirror == sourcemirror: continue mirrorpath = mirror + relpath mirroredfiles.add(mirrorpath) if mirrorpath in allchanges: wctx = repo[None] if (sourcepath not in wctx and mirrorpath not in wctx and sourcepath in status.removed and mirrorpath in status.removed): if repo.ui.verbose: repo.ui.status(_("not mirroring remove of '%s' to '%s';" " it is already removed\n") % (sourcepath, mirrorpath)) continue if wctx[sourcepath].data() == wctx[mirrorpath].data(): if repo.ui.verbose: repo.ui.status(_("not mirroring '%s' to '%s'; it already " "matches\n") % (sourcepath, mirrorpath)) continue raise error.Abort(_("path '%s' needs to be mirrored to '%s', but " "the target already has pending changes") % (sourcepath, mirrorpath)) fullsource = repo.wjoin(sourcepath) fulltarget = repo.wjoin(mirrorpath) dirstate = repo.dirstate if action == 'm' or action == 'a': mirrorpathdir, unused = util.split(mirrorpath) util.makedirs(repo.wjoin(mirrorpathdir)) util.copyfile(fullsource, fulltarget) if dirstate[mirrorpath] in '?r': dirstate.add(mirrorpath) if action == 'a': # For adds, detect copy data as well copysource = dirstate.copied(sourcepath) if copysource and copysource.startswith(sourcemirror): mirrorcopysource = mirror + copysource[len(sourcemirror):] dirstate.copy(mirrorcopysource, mirrorpath) repo.ui.status(_("mirrored copy '%s -> %s' to '%s -> %s'\n") % (copysource, sourcepath, mirrorcopysource, mirrorpath)) else: repo.ui.status(_("mirrored adding '%s' to '%s'\n") % (sourcepath, mirrorpath)) else: repo.ui.status(_("mirrored changes in '%s' to '%s'\n") % (sourcepath, mirrorpath)) elif action == 'r': try: util.unlink(fulltarget) except OSError as e: if e.errno == errno.ENOENT: repo.ui.status(_("not mirroring remove of '%s' to '%s'; it " "is already removed\n") % (sourcepath, mirrorpath)) else: raise else: dirstate.remove(mirrorpath) repo.ui.status(_("mirrored remove of '%s' to '%s'\n") % (sourcepath, mirrorpath)) return mirroredfiles
def merge_update(orig, repo, node, branchmerge, force, ancestor=None, mergeancestor=False, labels=None, matcher=None, mergeforce=False, updatecheck=None): assert node is not None if not util.safehasattr(repo.dirstate, 'eden_client'): # This is not an eden repository useeden = False if matcher is not None and not matcher.always(): # We don't support doing a partial update through eden yet. useeden = False elif branchmerge or ancestor is not None: useeden = False else: # TODO: We probably also need to set useeden = False if there are # subrepositories. (Personally I might vote for just not supporting # subrepos in eden.) useeden = True if not useeden: repo.ui.debug("falling back to non-eden update code path") return orig(repo, node, branchmerge, force, ancestor=ancestor, mergeancestor=mergeancestor, labels=labels, matcher=matcher, mergeforce=mergeforce) with repo.wlock(): wctx = repo[None] parents = wctx.parents() p1ctx = parents[0] destctx = repo[node] deststr = str(destctx) if not force: # Make sure there isn't an outstanding merge or unresolved files. if len(parents) > 1: raise error.Abort(_("outstanding uncommitted merge")) ms = mergemod.mergestate.read(repo) if list(ms.unresolved()): raise error.Abort(_("outstanding merge conflicts")) # The vanilla merge code disallows updating between two unrelated # branches if the working directory is dirty. I don't really see a # good reason to disallow this; it should be treated the same as if # we committed the changes, checked out the other branch then tried # to graft the changes here. # Invoke the preupdate hook repo.hook('preupdate', throw=True, parent1=deststr, parent2='') # note that we're in the middle of an update repo.vfs.write('updatestate', destctx.hex()) # Ask eden to perform the checkout if force or p1ctx != destctx: conflicts = repo.dirstate.eden_client.checkout(destctx.node(), force=force) else: conflicts = None # Handle any conflicts # The stats returned are numbers of files affected: # (updated, merged, removed, unresolved) # The updated and removed file counts will always be 0 in our case. if conflicts and not force: stats = _handleupdateconflicts(repo, wctx, p1ctx, destctx, labels, conflicts) else: stats = 0, 0, 0, 0 # Clear the update state util.unlink(repo.vfs.join('updatestate')) # Invoke the update hook repo.hook('update', parent1=deststr, parent2='', error=stats[3]) return stats
def cmddebugconvertobsolete(ui, repo): """import markers from an .hg/obsolete-relations file""" cnt = 0 err = 0 l = repo.lock() some = False try: unlink = [] tr = repo.transaction('convert-obsolete') try: repo._importoldobsolete = True store = repo.obsstore ### very first format try: f = repo.opener('obsolete-relations') try: some = True for line in f: subhex, objhex = line.split() suc = bin(subhex) prec = bin(objhex) sucs = (suc==nullid) and [] or [suc] meta = { 'date': '%i %i' % util.makedate(), 'user': ui.username(), } try: store.create(tr, prec, sucs, 0, metadata=meta) cnt += 1 except ValueError: repo.ui.write_err("invalid old marker line: %s" % (line)) err += 1 finally: f.close() unlink.append(repo.join('obsolete-relations')) except IOError: pass ### second (json) format data = repo.svfs.tryread('obsoletemarkers') if data: some = True for oldmark in json.loads(data): del oldmark['id'] # dropped for now del oldmark['reason'] # unused until then oldobject = str(oldmark.pop('object')) oldsubjects = [str(s) for s in oldmark.pop('subjects', [])] LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError) if len(oldobject) != 40: try: oldobject = repo[oldobject].node() except LOOKUP_ERRORS: pass if any(len(s) != 40 for s in oldsubjects): try: oldsubjects = [repo[s].node() for s in oldsubjects] except LOOKUP_ERRORS: pass oldmark['date'] = '%i %i' % tuple(oldmark['date']) meta = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in oldmark.iteritems()) try: succs = [bin(n) for n in oldsubjects] succs = [n for n in succs if n != nullid] store.create(tr, bin(oldobject), succs, 0, metadata=meta) cnt += 1 except ValueError: repo.ui.write_err("invalid marker %s -> %s\n" % (oldobject, oldsubjects)) err += 1 unlink.append(repo.sjoin('obsoletemarkers')) tr.close() for path in unlink: util.unlink(path) finally: tr.release() finally: del repo._importoldobsolete l.release() if not some: ui.warn(_('nothing to do\n')) ui.status('%i obsolete marker converted\n' % cnt) if err: ui.write_err('%i conversion failed. check you graph!\n' % err)
def clearstatus(repo): 'Remove the status files' if os.path.exists(repo.join("rebasestate")): util.unlink(repo.join("rebasestate"))
def demo(ui, repo, *args, **opts): '''print [keywordmaps] configuration and an expansion example Show current, custom, or default keyword template maps and their expansions. Extend the current configuration by specifying maps as arguments and using -f/--rcfile to source an external hgrc file. Use -d/--default to disable current configuration. See :hg:`help templates` for information on templates and filters. ''' def demoitems(section, items): ui.write('[%s]\n' % section) for k, v in sorted(items): ui.write('%s = %s\n' % (k, v)) fn = 'demo.txt' tmpdir = tempfile.mkdtemp('', 'kwdemo.') ui.note(_('creating temporary repository at %s\n') % tmpdir) repo = localrepo.localrepository(repo.baseui, tmpdir, True) ui.setconfig('keyword', fn, '', 'keyword') svn = ui.configbool('keywordset', 'svn') # explicitly set keywordset for demo output ui.setconfig('keywordset', 'svn', svn, 'keyword') uikwmaps = ui.configitems('keywordmaps') if args or opts.get('rcfile'): ui.status(_('\n\tconfiguration using custom keyword template maps\n')) if uikwmaps: ui.status(_('\textending current template maps\n')) if opts.get('default') or not uikwmaps: if svn: ui.status(_('\toverriding default svn keywordset\n')) else: ui.status(_('\toverriding default cvs keywordset\n')) if opts.get('rcfile'): ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] fp = repo.opener('hgrc', 'w') fp.writelines(rcmaps) fp.close() ui.readconfig(repo.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) elif opts.get('default'): if svn: ui.status(_('\n\tconfiguration using default svn keywordset\n')) else: ui.status(_('\n\tconfiguration using default cvs keywordset\n')) kwmaps = _defaultkwmaps(ui) if uikwmaps: ui.status(_('\tdisabling current template maps\n')) for k, v in kwmaps.iteritems(): ui.setconfig('keywordmaps', k, v, 'keyword') else: ui.status(_('\n\tconfiguration using current keyword template maps\n')) if uikwmaps: kwmaps = dict(uikwmaps) else: kwmaps = _defaultkwmaps(ui) uisetup(ui) reposetup(ui, repo) ui.write('[extensions]\nkeyword =\n') demoitems('keyword', ui.configitems('keyword')) demoitems('keywordset', ui.configitems('keywordset')) demoitems('keywordmaps', kwmaps.iteritems()) keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n' repo.wopener.write(fn, keywords) repo[None].add([fn]) ui.note(_('\nkeywords written to %s:\n') % fn) ui.note(keywords) wlock = repo.wlock() try: repo.dirstate.setbranch('demobranch') finally: wlock.release() for name, cmd in ui.configitems('hooks'): if name.split('.', 1)[0].find('commit') > -1: repo.ui.setconfig('hooks', name, '', 'keyword') msg = _('hg keyword configuration and expansion example') ui.note(("hg ci -m '%s'\n" % msg)) repo.commit(text=msg) ui.status(_('\n\tkeywords expanded\n')) ui.write(repo.wread(fn)) for root, dirs, files in os.walk(tmpdir, topdown=False): for f in files: util.unlink(os.path.join(root, f)) for d in dirs: os.rmdir(os.path.join(root, d)) os.rmdir(tmpdir)
_('You have selected one or more files that have been ' 'modified. By default, these files will not be ' 'removed. What would you like to do?'), self, (_('Remove &Unmodified Files'), _('Remove &All Selected Files'), _('Cancel')), 0, 2, selmodified) ret = prompt.run() if ret == 1: cmdopts['force'] = True elif ret == 2: return unknown, ignored = repostate[4:6] for wfile in files: if wfile in unknown or wfile in ignored: try: util.unlink(wfile) except EnvironmentError: pass files.remove(wfile) elif self.command == 'add': if 'largefiles' in self.repo.extensions(): self.addWithPrompt(files) return if files: self._runCommand(files, [], cmdopts) else: self.reject() def reject(self): if not self._cmdsession.isFinished(): self._cmdsession.abort()
def unlink(self): util.unlink(self.filename())
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.ParseError('rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError('rebase', _('cannot specify both a ' 'revision and a base')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, collapsef) if result: originalwd, target, state, external = result else: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase targetancestors = list(repo.changelog.ancestors(target)) targetancestors.append(target) for rev in sorted(state): if state[rev] == -1: storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) rebasenode(repo, rev, target, state, skipped, targetancestors, collapsef, extrafn) ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) concludenode(repo, rev, p1, external, state, collapsef, last=True, skipped=skipped, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions if set(repo.changelog.descendants(min(state))) - set(state): ui.warn(_("warning: new changesets detected on source branch, " "not stripping\n")) else: repair.strip(ui, repo, repo[min(state)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def my_update(orig, ui, repo, *args, **opts): bigfiles0 = parse_bigfiles(repo) res = orig(ui, repo, *args, **opts) if opts.get('clean'): return res bigfiles1 = parse_bigfiles(repo) m1 = None m2 = None for file in bigfiles0.keys(): if file not in bigfiles1: if not m1: parent1, parent2 = repo.dirstate.parents() m1 = repo[parent1].manifest() m2 = repo[parent2].manifest() if file in m1 or file not in m2: continue try: f = repo.wjoin(file) os.lstat(f) ui.write(_("unlinking %s.%s\n") % (file, bigfiles0[file])) util.unlink(f) except OSError: pass tofetch = {} for file, hash in bigfiles1.iteritems(): if file not in bigfiles0 or bigfiles0[file] != hash: tofetch[file] = hash for file in bigfiles0: if file not in bigfiles1 and file not in repo.dirstate and \ os.path.exists(file): ui.write(_("unlinking %s.%s\n") % (file, bigfiles0[file])) util.unlink(file) if tofetch: brepo = bigfiles_repo(ui) missing = {} for file, hash in tofetch.iteritems(): f = repo.wjoin(file) rf = "%s/%s.%s" % (brepo, file, hash) util.makedirs(os.path.dirname(f)) if os.path.exists(f): util.unlink(f) if os.path.exists(rf): util.copyfile(rf, f) else: rf = rf + '.gz' if os.path.exists(rf): ui.write(_("fetching %s.%s\n") % (file, hash)) fo = open(f, 'wb') rfo = gzip.open(rf, 'rb') def read10Mb(): return rfo.read(1024*1024*10) for chunk in iter(read10Mb, ''): fo.write(chunk) fo.close() rfo.close() else: missing[file] = hash if missing: ui.write(_("\nNeeded files missing in bigrepo %s:\n") % brepo) for file, hash in missing.iteritems(): ui.write("%s.%s\n" % (file, hash)) return res
def delfile(self, f): try: util.unlink(self.repo.wjoin(f)) #self.repo.remove([f]) except OSError: pass
def merge_update( orig, repo, node, branchmerge, force, ancestor=None, mergeancestor=False, labels=None, matcher=None, mergeforce=False, updatecheck=None, wc=None, ): """Apparently node can be a 20-byte hash or an integer referencing a revision number. """ assert node is not None if not util.safehasattr(repo.dirstate, "eden_client"): why_not_eden = "This is not an eden repository." elif matcher is not None and not matcher.always(): why_not_eden = "We don't support doing a partial update through " "eden yet." elif branchmerge: # TODO: We potentially should support handling this scenario ourself in # the future. For now we simply haven't investigated what the correct # semantics are in this case. why_not_eden = 'branchmerge is "truthy:" %s.' % branchmerge elif ancestor is not None: # TODO: We potentially should support handling this scenario ourself in # the future. For now we simply haven't investigated what the correct # semantics are in this case. why_not_eden = "ancestor is not None: %s." % ancestor elif wc is not None and wc.isinmemory(): # In memory merges do not operate on the working directory, # so we don't need to ask eden to change the working directory state # at all, and can use the vanilla merge logic in this case. why_not_eden = "merge is in-memory" else: # TODO: We probably also need to set why_not_eden if there are # subrepositories. (Personally I might vote for just not supporting # subrepos in eden.) why_not_eden = None if why_not_eden: repo.ui.debug("falling back to non-eden update code path: %s\n" % why_not_eden) return orig( repo, node, branchmerge, force, ancestor=ancestor, mergeancestor=mergeancestor, labels=labels, matcher=matcher, mergeforce=mergeforce, updatecheck=updatecheck, wc=wc, ) else: repo.ui.debug("using eden update code path\n") with repo.wlock(): wctx = repo[None] parents = wctx.parents() p1ctx = parents[0] destctx = repo[node] deststr = str(destctx) if not force: # Make sure there isn't an outstanding merge or unresolved files. if len(parents) > 1: raise error.Abort(_("outstanding uncommitted merge")) ms = mergemod.mergestate.read(repo) if list(ms.unresolved()): raise error.Abort(_("outstanding merge conflicts")) # The vanilla merge code disallows updating between two unrelated # branches if the working directory is dirty. I don't really see a # good reason to disallow this; it should be treated the same as if # we committed the changes, checked out the other branch then tried # to graft the changes here. if p1ctx == destctx: # No update to perform. # Just invoke the hooks and return. repo.hook("preupdate", throw=True, parent1=deststr, parent2="") repo.hook("update", parent1=deststr, parent2="", error=0) return 0, 0, 0, 0 # If we are in noconflict mode, then we must do a DRY_RUN first to # see if there are any conflicts that should prevent us from # attempting the update. if updatecheck == "noconflict": conflicts = repo.dirstate.eden_client.checkout( destctx.node(), CheckoutMode.DRY_RUN) if conflicts: actions = _determine_actions_for_conflicts( repo, p1ctx, conflicts) _check_actions_and_raise_if_there_are_conflicts(actions) # Invoke the preupdate hook repo.hook("preupdate", throw=True, parent1=deststr, parent2="") # Record that we're in the middle of an update try: vfs = repo.localvfs except AttributeError: vfs = repo.vfs vfs.write("updatestate", destctx.hex()) # Ask eden to perform the checkout if force: # eden_client.checkout() returns the list of conflicts here, # but since this is a force update it will have already replaced # the conflicts with the destination file state, so we don't have # to do anything with them here. conflicts = repo.dirstate.eden_client.checkout( destctx.node(), CheckoutMode.FORCE) # We do still need to make sure to update the merge state though. # In the non-force code path the merge state is updated in # _handle_update_conflicts(). ms = mergemod.mergestate.clean(repo, p1ctx.node(), destctx.node(), labels) ms.commit() stats = 0, 0, 0, 0 actions = {} else: conflicts = repo.dirstate.eden_client.checkout( destctx.node(), CheckoutMode.NORMAL) # TODO(mbolin): Add a warning if we did a DRY_RUN and the conflicts # we get here do not match. Only in the event of a race would we # expect them to differ from when the DRY_RUN was done (or if we # decide that DIRECTORY_NOT_EMPTY conflicts do not need to be # reported during a DRY_RUN). stats, actions = _handle_update_conflicts(repo, wctx, p1ctx, destctx, labels, conflicts, force) with repo.dirstate.parentchange(): if force: # If the user has done an `update --clean`, then we should # remove all entries from the dirstate. Note this call to # clear() will also remove the parents, but we set them on the # next line, so we'll be OK. repo.dirstate.clear() # TODO(mbolin): Set the second parent, if appropriate. repo.setparents(destctx.node()) mergemod.recordupdates(repo, actions, branchmerge) # Clear the update state util.unlink(vfs.join("updatestate")) # Invoke the update hook repo.hook("update", parent1=deststr, parent2="", error=stats[3]) return stats
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing local changes relative to a master development tree. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if detachf: raise error.ParseError( 'rebase', _('cannot use detach with continue or abort')) if srcf or basef or destf: raise error.ParseError( 'rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError( 'rebase', _('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise error.ParseError( 'rebase', _('detach requires a revision to be specified')) if basef: raise error.ParseError( 'rebase', _('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) for rev in sorted(state): if state[rev] == -1: ui.debug("rebasing %d:%s\n" % (rev, repo[rev])) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort( _('fix unresolved conflicts with hg ' 'resolve then run hg rebase --continue')) updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn( _("warning: new changesets detected " "on source branch, not stripping\n")) else: repair.strip(ui, repo, repo[min(rebased)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def uncrustify(ui, repo, *patterns, **options): """Run uncrustify on the specified files or directories. If no files are specified, operates on the whole working directory. Note: Files that don't have a .cc or .h suffix are always ignored, even if specified on the command line explicitly. By default, prints a list of files that are not clean according to uncrustify, using a similar output format as with hg status. No changes are made to the working directory. With the --diff option, prints the changes suggested by uncrustify in unified diff format. No changes are made to the working directory. With the --modify option, actually performs the changes suggested by uncrustify. The original (dirty) files are backed up with a .crusty suffix. Existing files with such a suffix are silently overwritten. To disable these backups, use --no-backup. This command always operates on the working directory, not on arbitrary repository revisions. Returns 0 on success. """ if options["diff"] and options["modify"]: raise util.Abort("cannot specify --diff and --modify at the same time") if options["diff"]: mode = "diff" elif options["modify"]: mode = "modify" else: mode = "status" no_backup = options["no_backup"] show_clean = options["show_clean"] paths = [ path for path in _get_files(repo, patterns, options) if path.endswith((".cc", ".h")) ] uncrustify_cfg = repo.pathto(".uncrustify.cfg") relpaths = [repo.pathto(path) for path in paths] if not os.path.exists(uncrustify_cfg): raise util.Abort("could not find .uncrustify.cfg in repository root") _run_uncrustify(uncrustify_cfg, relpaths) ctx = repo[None] for path in paths: relpath = repo.pathto(path) uncr_path = path + SUFFIX uncr_relpath = relpath + SUFFIX have_changes = (ctx[path].data() != ctx[uncr_path].data()) if have_changes: if mode == "status": ui.write("M %s\n" % relpath, label="status.modified") util.unlink(uncr_relpath) elif mode == "diff": _run_diff(relpath, uncr_relpath) util.unlink(uncr_relpath) elif mode == "modify": if not no_backup: util.rename(relpath, relpath + ".crusty") util.rename(uncr_relpath, relpath) if not ui.quiet: ui.write("%s uncrustified\n" % relpath) else: if show_clean: if mode == "status": ui.write("C %s\n" % relpath, label="status.clean") elif mode == "modify": ui.write("%s is clean\n" % relpath) util.unlink(uncr_relpath)
def uncrustify(ui, repo, *patterns, **options): """Run uncrustify on the specified files or directories. If no files are specified, operates on the whole working directory. Note: Files that don't have a .cc or .h suffix are always ignored, even if specified on the command line explicitly. By default, prints a list of files that are not clean according to uncrustify, using a similar output format as with hg status. No changes are made to the working directory. With the --diff option, prints the changes suggested by uncrustify in unified diff format. No changes are made to the working directory. With the --modify option, actually performs the changes suggested by uncrustify. The original (dirty) files are backed up with a .crusty suffix. Existing files with such a suffix are silently overwritten. To disable these backups, use --no-backup. This command always operates on the working directory, not on arbitrary repository revisions. Returns 0 on success. """ if options["diff"] and options["modify"]: raise util.Abort("cannot specify --diff and --modify at the same time") if options["diff"]: mode = "diff" elif options["modify"]: mode = "modify" else: mode = "status" no_backup = options["no_backup"] show_clean = options["show_clean"] paths = [path for path in _get_files(repo, patterns, options) if path.endswith((".cc", ".h"))] uncrustify_cfg = repo.pathto(".uncrustify.cfg") relpaths = [repo.pathto(path) for path in paths] if not os.path.exists(uncrustify_cfg): raise util.Abort("could not find .uncrustify.cfg in repository root") _run_uncrustify(uncrustify_cfg, relpaths) ctx = repo[None] for path in paths: relpath = repo.pathto(path) uncr_path = path + SUFFIX uncr_relpath = relpath + SUFFIX have_changes = (ctx[path].data() != ctx[uncr_path].data()) if have_changes: if mode == "status": ui.write("M %s\n" % relpath, label="status.modified") util.unlink(uncr_relpath) elif mode == "diff": _run_diff(relpath, uncr_relpath) util.unlink(uncr_relpath) elif mode == "modify": if not no_backup: util.rename(relpath, relpath + ".crusty") util.rename(uncr_relpath, relpath) if not ui.quiet: ui.write("%s uncrustified\n" % relpath) else: if show_clean: if mode == "status": ui.write("C %s\n" % relpath, label="status.clean") elif mode == "modify": ui.write("%s is clean\n" % relpath) util.unlink(uncr_relpath)
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history (the source) onto another (the destination). This can be useful for linearizing *local* changes relative to a master development tree. You should not rebase changesets that have already been shared with others. Doing so will force everybody else to perform the same rebase or they will end up with duplicated changesets after pulling in your rebased changesets. If you don't specify a destination changeset (``-d/--dest``), rebase uses the tipmost head of the current named branch as the destination. (The destination changeset is not modified by rebasing, but new changesets are added as its descendants.) You can specify which changesets to rebase in two ways: as a "source" changeset or as a "base" changeset. Both are shorthand for a topologically related set of changesets (the "source branch"). If you specify source (``-s/--source``), rebase will rebase that changeset and all of its descendants onto dest. If you specify base (``-b/--base``), rebase will select ancestors of base back to but not including the common ancestor with dest. Thus, ``-b`` is less precise but more convenient than ``-s``: you can specify any changeset in the source branch, and rebase will select the whole branch. If you specify neither ``-s`` nor ``-b``, rebase uses the parent of the working directory as the base. By default, rebase recreates the changesets in the source branch as descendants of dest and then destroys the originals. Use ``--keep`` to preserve the original source changesets. Some changesets in the source branch (e.g. merges from the destination branch) may be dropped if they no longer contribute any change. One result of the rules for selecting the destination changeset and source branch is that, unlike ``merge``, rebase will do nothing if you are at the latest (tipmost) head of a named branch with two heads. You need to explicitly specify source and/or destination (or ``update`` to the other head, if it's the head of the intended source branch). If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. Returns 0 on success, 1 if nothing to rebase. """ originalwd = target = None external = nullrev state = {} skipped = set() targetancestors = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) detachf = opts.get('detach', False) # keepopen is not meant for use on the command line, but by # other extensions keepopen = opts.get('keepopen', False) if contf or abortf: if contf and abortf: raise util.Abort(_('cannot use both abort and continue')) if collapsef: raise util.Abort( _('cannot use collapse with continue or abort')) if detachf: raise util.Abort(_('cannot use detach with continue or abort')) if srcf or basef or destf: raise util.Abort( _('abort and continue do not allow specifying revisions')) (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: return abort(repo, originalwd, target, state) else: if srcf and basef: raise util.Abort(_('cannot specify both a ' 'revision and a base')) if detachf: if not srcf: raise util.Abort( _('detach requires a revision to be specified')) if basef: raise util.Abort(_('cannot specify a base with detach')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, detachf) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 else: originalwd, target, state = result if collapsef: targetancestors = set(repo.changelog.ancestors(target)) external = checkexternal(repo, state, targetancestors) if keepbranchesf: if extrafn: raise util.Abort(_('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase if not targetancestors: targetancestors = set(repo.changelog.ancestors(target)) targetancestors.add(target) sortedstate = sorted(state) total = len(sortedstate) pos = 0 for rev in sortedstate: pos += 1 if state[rev] == -1: ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])), _('changesets'), total) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) p1, p2 = defineparents(repo, rev, target, state, targetancestors) if len(repo.parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: stats = rebasenode(repo, rev, p1, p2, state) if stats and stats[3] > 0: raise util.Abort(_('unresolved conflicts (see hg ' 'resolve, then hg rebase --continue)')) updatedirstate(repo, rev, target, p2) if not collapsef: newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn) else: # Skip commit if we are collapsing repo.dirstate.setparents(repo[p1].node()) newrev = None # Update the state if newrev is not None: state[rev] = repo[newrev].rev() else: if not collapsef: ui.note(_('no changes, revision %d skipped\n') % rev) ui.debug('next revision set to %s\n' % p1) skipped.add(rev) state[rev] = p1 ui.progress(_('rebasing'), None) ui.note(_('rebase merging completed\n')) if collapsef and not keepopen: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) commitmsg = 'Collapsed revision' for rebased in state: if rebased not in skipped and state[rebased] != nullmerge: commitmsg += '\n* %s' % repo[rebased].description() commitmsg = ui.edit(commitmsg, repo.ui.username()) newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions rebased = [rev for rev in state if state[rev] != nullmerge] if rebased: if set(repo.changelog.descendants(min(rebased))) - set(state): ui.warn(_("warning: new changesets detected " "on source branch, not stripping\n")) else: # backup the old csets by default repair.strip(ui, repo, repo[min(rebased)].node(), "all") clearstatus(repo) ui.note(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)
def cmddebugconvertobsolete(ui, repo): """import markers from an .hg/obsolete-relations file""" cnt = 0 err = 0 l = repo.lock() some = False try: unlink = [] tr = repo.transaction('convert-obsolete') try: repo._importoldobsolete = True store = repo.obsstore ### very first format try: f = repo.opener('obsolete-relations') try: some = True for line in f: subhex, objhex = line.split() suc = bin(subhex) prec = bin(objhex) sucs = (suc==nullid) and [] or [suc] meta = { 'date': '%i %i' % util.makedate(), 'user': ui.username(), } try: store.create(tr, prec, sucs, 0, meta) cnt += 1 except ValueError: repo.ui.write_err("invalid old marker line: %s" % (line)) err += 1 finally: f.close() unlink.append(repo.join('obsolete-relations')) except IOError: pass ### second (json) format data = repo.sopener.tryread('obsoletemarkers') if data: some = True for oldmark in json.loads(data): del oldmark['id'] # dropped for now del oldmark['reason'] # unused until then oldobject = str(oldmark.pop('object')) oldsubjects = [str(s) for s in oldmark.pop('subjects', [])] LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError) if len(oldobject) != 40: try: oldobject = repo[oldobject].node() except LOOKUP_ERRORS: pass if any(len(s) != 40 for s in oldsubjects): try: oldsubjects = [repo[s].node() for s in oldsubjects] except LOOKUP_ERRORS: pass oldmark['date'] = '%i %i' % tuple(oldmark['date']) meta = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in oldmark.iteritems()) try: succs = [bin(n) for n in oldsubjects] succs = [n for n in succs if n != nullid] store.create(tr, bin(oldobject), succs, 0, meta) cnt += 1 except ValueError: repo.ui.write_err("invalid marker %s -> %s\n" % (oldobject, oldsubjects)) err += 1 unlink.append(repo.sjoin('obsoletemarkers')) tr.close() for path in unlink: util.unlink(path) finally: tr.release() finally: del repo._importoldobsolete l.release() if not some: ui.warn('nothing to do\n') ui.status('%i obsolete marker converted\n' % cnt) if err: ui.write_err('%i conversion failed. check you graph!\n' % err)
class basestore(object): def __init__(self, ui, repo, url): self.ui = ui self.repo = repo self.url = url def put(self, source, hash): '''Put source file into the store so it can be retrieved by hash.''' raise NotImplementedError('abstract method') def exists(self, hashes): '''Check to see if the store contains the given hashes. Given an iterable of hashes it returns a mapping from hash to bool.''' raise NotImplementedError('abstract method') def get(self, files): '''Get the specified largefiles from the store and write to local files under repo.root. files is a list of (filename, hash) tuples. Return (success, missing), lists of files successfully downloaded and those not found in the store. success is a list of (filename, hash) tuples; missing is a list of filenames that we could not get. (The detailed error message will already have been presented to the user, so missing is just supplied as a summary.)''' success = [] missing = [] ui = self.ui at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: ui.progress(_('getting largefiles'), at, unit='lfile', total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) if not available.get(hash): ui.warn( _('%s: largefile %s not available from %s\n') % (filename, hash, util.hidepassword(self.url))) missing.append(filename) continue if self._gethash(filename, hash): success.append((filename, hash)) else: missing.append(filename) ui.progress(_('getting largefiles'), None) return (success, missing) def _gethash(self, filename, hash): """Get file with the provided hash and store it in the local repo's store and in the usercache. filename is for informational messages only. """ util.makedirs(lfutil.storepath(self.repo, '')) storefilename = lfutil.storepath(self.repo, hash) tmpname = storefilename + '.tmp' tmpfile = util.atomictempfile(tmpname, createmode=self.repo.store.createmode) try: gothash = self._getfile(tmpfile, filename, hash) except StoreError, err: self.ui.warn(err.longmessage()) gothash = "" tmpfile.close() if gothash != hash: if gothash != "": self.ui.warn( _('%s: data corruption (expected %s, got %s)\n') % (filename, hash, gothash)) util.unlink(tmpname) return False util.rename(tmpname, storefilename) lfutil.linktousercache(self.repo, hash) return True
def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch Rebase uses repeated merging to graft changesets from one part of history onto another. This can be useful for linearizing local changes relative to a master development tree. If a rebase is interrupted to manually resolve a merge, it can be continued with --continue/-c or aborted with --abort/-a. """ originalwd = target = None external = nullrev state = {} skipped = set() lock = wlock = None try: lock = repo.lock() wlock = repo.wlock() # Validate input and define rebasing points destf = opts.get('dest', None) srcf = opts.get('source', None) basef = opts.get('base', None) contf = opts.get('continue') abortf = opts.get('abort') collapsef = opts.get('collapse', False) extrafn = opts.get('extrafn') keepf = opts.get('keep', False) keepbranchesf = opts.get('keepbranches', False) if contf or abortf: if contf and abortf: raise error.ParseError('rebase', _('cannot use both abort and continue')) if collapsef: raise error.ParseError( 'rebase', _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.ParseError( 'rebase', _('abort and continue do not allow specifying revisions')) (originalwd, target, state, collapsef, keepf, keepbranchesf, external) = restorestatus(repo) if abortf: abort(repo, originalwd, target, state) return else: if srcf and basef: raise error.ParseError( 'rebase', _('cannot specify both a ' 'revision and a base')) cmdutil.bail_if_changed(repo) result = buildstate(repo, destf, srcf, basef, collapsef) if result: originalwd, target, state, external = result else: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return if keepbranchesf: if extrafn: raise error.ParseError( 'rebase', _('cannot use both keepbranches and extrafn')) def extrafn(ctx, extra): extra['branch'] = ctx.branch() # Rebase targetancestors = list(repo.changelog.ancestors(target)) targetancestors.append(target) for rev in sorted(state): if state[rev] == -1: storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external) rebasenode(repo, rev, target, state, skipped, targetancestors, collapsef, extrafn) ui.note(_('rebase merging completed\n')) if collapsef: p1, p2 = defineparents(repo, min(state), target, state, targetancestors) concludenode(repo, rev, p1, external, state, collapsef, last=True, skipped=skipped, extrafn=extrafn) if 'qtip' in repo.tags(): updatemq(repo, state, skipped, **opts) if not keepf: # Remove no more useful revisions if set(repo.changelog.descendants(min(state))) - set(state): ui.warn( _("warning: new changesets detected on source branch, " "not stripping\n")) else: repair.strip(ui, repo, repo[min(state)].node(), "strip") clearstatus(repo) ui.status(_("rebase completed\n")) if os.path.exists(repo.sjoin('undo')): util.unlink(repo.sjoin('undo')) if skipped: ui.note(_("%d revisions have been skipped\n") % len(skipped)) finally: release(lock, wlock)