def orderFiles(sourcePath, file): srcPath = sourcePath + "/" + file d = get_date(srcPath) destPath = dest + d[0] + "/" + d[1] + "_" + d[0] + "/" + file make_sure_path_exists(dest + d[0] + "/" + d[1] + "_" + d[0]) if not (os.path.exists(destPath)): copyfile(srcPath, destPath)
def _updatebigrepo(ui, repo, files, brepo, bigfiles, ds): for file in files: f = repo.wjoin(file) hash = accelerated_hash(repo, file, os.lstat(f), ds) bigfiles[file] = hash rf = "%s/%s.%s" % (brepo, file, hash) util.makedirs(os.path.dirname(rf)) try: ext = f.split('.')[-1] dont_pack=['gz', 'zip', 'tgz', '7z', 'jpg', 'jpeg', 'gif', 'mpg', 'mpeg', 'avi', 'rar', 'cab'] if ext in dont_pack: util.copyfile(f, rf) else: fo = open(f, 'rb') rfo_fileobj = open(rf+'.gz', 'wb') rfo = gzip.GzipFile(file+'.'+hash, 'wb', 9, rfo_fileobj) def read10Mb(): return fo.read(1024*1024*10) for chunk in iter(read10Mb, ''): rfo.write(chunk) fo.close() rfo.close() rfo_fileobj.close() except: ui.write(_('failed to store %s\n') % f)
def remember_path(ui, repo, path, value): '''appends the path to the working copy's hgrc and backs up the original''' paths = dict(ui.configitems('paths')) # This should never happen. if path in paths: return # ConfigParser only cares about these three characters. if re.search(r'[:=\s]', path): return audit_path = getattr(repo.opener, 'audit_path', util.path_auditor(repo.root)) audit_path('hgrc') audit_path('hgrc.backup') base = repo.opener.base util.copyfile(os.path.join(base, 'hgrc'), os.path.join(base, 'hgrc.backup')) ui.setconfig('paths', path, value) try: fp = repo.opener('hgrc', 'a', text=True) # Mercurial assumes Unix newlines by default and so do we. fp.write('\n[paths]\n%s = %s\n' % (path, value)) fp.close() except IOError, e: return
def write(repo): '''Write bookmarks Write the given bookmark => hash dictionary to the .hg/bookmarks file in a format equal to those of localtags. We also store a backup of the previous state in undo.bookmarks that can be copied back on rollback. ''' refs = repo._bookmarks if os.path.exists(repo.join('bookmarks')): util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) if repo._bookmarkcurrent not in refs: setcurrent(repo, None) wlock = repo.wlock() try: file = repo.opener('bookmarks', 'w', atomictemp=True) for refspec, node in refs.iteritems(): file.write("%s %s\n" % (hex(node), refspec)) file.rename() # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) try: os.utime(repo.sjoin('00changelog.i'), None) except OSError: pass finally: wlock.release()
def remember_path(ui, repo, path, value): '''appends the path to the working copy's hgrc and backs up the original''' paths = dict(ui.configitems('paths')) # This should never happen. if path in paths: return # ConfigParser only cares about these three characters. if re.search(r'[:=\s]', path): return try: audit_path = scmutil.pathauditor(repo.root) except ImportError: audit_path = getattr(repo.opener, 'audit_path', util.path_auditor(repo.root)) audit_path('hgrc') audit_path('hgrc.backup') base = repo.opener.base hgrc, backup = [os.path.join(base, x) for x in 'hgrc', 'hgrc.backup'] if os.path.exists(hgrc): util.copyfile(hgrc, backup) ui.setconfig('paths', path, value) try: fp = repo.opener('hgrc', 'a', text=True) # Mercurial assumes Unix newlines by default and so do we. fp.write('\n[paths]\n%s = %s\n' % (path, value)) fp.close() except IOError: return
def savebackup(orig, self, tr, backupname): orig(self, tr, backupname) narrowbackupname = _narrowbackupname(backupname) self._opener.tryunlink(narrowbackupname) hgutil.copyfile(self._opener.join(narrowspec.FILENAME), self._opener.join(narrowbackupname), hardlink=True)
def savebackup(self, tr, backupname): super(narrowdirstate, self).savebackup(tr, backupname) narrowbackupname = _narrowbackupname(backupname) self._opener.tryunlink(narrowbackupname) hgutil.copyfile(self._opener.join(narrowspec.FILENAME), self._opener.join(narrowbackupname), hardlink=True)
def dodiff(tmproot): assert not (hascopies and len(MAR) > 1), \ 'dodiff cannot handle copies when diffing dirs' sa = [mod_a, add_a, rem_a] sb = [mod_b, add_b, rem_b] ctxs = [ctx1a, ctx1b, ctx2] # If more than one file, diff on working dir copy. copyworkingdir = len(MAR) > 1 dirs, labels, fns_and_mtimes = snapshotset(repo, ctxs, sa, sb, cpy, tmproot, copyworkingdir) dir1a, dir1b, dir2 = dirs label1a, label1b, label2 = labels fns_and_mtime = fns_and_mtimes[2] if len(MAR) > 1 and label2 == '': label2 = 'working files' def getfile(fname, dir, label): file = os.path.join(tmproot, dir, fname) if os.path.isfile(file): return fname+label, file nullfile = os.path.join(tmproot, 'empty') fp = open(nullfile, 'w') fp.close() return _nonexistant+label, nullfile # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(MAR) == 1: file2 = util.localpath(MAR.pop()) if file2 in cto: file1 = util.localpath(cpy[file2]) else: file1 = file2 label1a, dir1a = getfile(file1, dir1a, label1a) if do3way: label1b, dir1b = getfile(file1, dir1b, label1b) label2, dir2 = getfile(file2, dir2, label2) if do3way: label1a += '[local]' label1b += '[other]' label2 += '[merged]' replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, plabel1=label1a, plabel2=label1b, phash1=str(ctx1a), phash2=str(ctx1b), repo=hglib.get_reponame(repo), clabel=label2, child=dir2, chash=str(ctx2)) launchtool(diffcmd, args, replace, True) # detect if changes were made to mirrored working files for copy_fn, working_fn, mtime in fns_and_mtime: if os.path.getmtime(copy_fn) != mtime: ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn)
def storeuntracked(repo, untracked): if not untracked: return os.mkdir(repo.join('tasks/untrackedbackup')) for f in untracked: shaname = util.sha1(f).hexdigest() util.copyfile(util.pathto(repo.root, None, f), repo.join('tasks/untrackedbackup/%s' % shaname)) util.unlink(util.pathto(repo.root, None, f))
def unshelve(ui, repo, **opts): '''restore shelved changes''' # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) # List all the active shelves by name and return ' if opts['list']: listshelves(ui, repo) return try: patch_diff = repo.opener(shelfpath).read() fp = cStringIO.StringIO(patch_diff) if opts['inspect']: ui.status(fp.getvalue()) else: files = [] ac = parsepatch(fp) for chunk in ac: if isinstance(chunk, header): files += chunk.files() backupdir = repo.join('shelve-backups') backups = makebackup(ui, repo, backupdir, set(files)) ui.debug('applying shelved patch\n') patchdone = 0 try: try: fp.seek(0) internalpatch(fp, ui, 1, repo.root) patchdone = 1 except: if opts['force']: patchdone = 1 else: ui.status('restoring backup files\n') for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: try: ui.debug('removing backup files\n') shutil.rmtree(backupdir, True) except OSError: pass if patchdone: ui.debug("removing shelved patches\n") os.unlink(repo.join(shelfpath)) ui.status("unshelve completed\n") except IOError: ui.warn('nothing to unshelve\n')
def unshelve(ui, repo, **opts): '''restore shelved changes''' # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) # List all the active shelves by name and return ' if opts['list']: listshelves(ui,repo) return try: patch_diff = repo.opener(shelfpath).read() fp = cStringIO.StringIO(patch_diff) if opts['inspect']: ui.status(fp.getvalue()) else: files = [] ac = parsepatch(fp) for chunk in ac: if isinstance(chunk, header): files += chunk.files() backupdir = repo.join('shelve-backups') backups = makebackup(ui, repo, backupdir, set(files)) ui.debug('applying shelved patch\n') patchdone = 0 try: try: fp.seek(0) internalpatch(fp, ui, 1, repo.root) patchdone = 1 except: if opts['force']: patchdone = 1 else: ui.status('restoring backup files\n') for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: try: ui.debug('removing backup files\n') shutil.rmtree(backupdir, True) except OSError: pass if patchdone: ui.debug("removing shelved patches\n") os.unlink(repo.join(shelfpath)) ui.status("unshelve completed\n") except IOError: ui.warn('nothing to unshelve\n')
def backup(self, repo, files, copy=False): # backup local changes in --force case for f in sorted(files): absf = repo.wjoin(f) if os.path.lexists(absf): self.ui.note(_('saving current version of %s as %s\n') % (f, f + '.orig')) if copy: util.copyfile(absf, absf + '.orig') else: util.rename(absf, absf + '.orig')
def unremember_path(ui, repo): '''restores the working copy's hgrc''' audit_path = getattr(repo.opener, 'audit_path', util.path_auditor(repo.root)) audit_path('hgrc') audit_path('hgrc.backup') base = repo.opener.base if os.path.exists(os.path.join(base, 'hgrc')): util.copyfile(os.path.join(base, 'hgrc.backup'), os.path.join(base, 'hgrc'))
def unshelve(ui, repo, *pats, **opts): '''restore shelved changes''' try: fp = cStringIO.StringIO() fp.write(repo.opener('shelve').read()) except: ui.warn(_('nothing to unshelve\n')) else: if opts['inspect']: ui.status(fp.getvalue()) else: files = [] fp.seek(0) for chunk in parsepatch(fp): if isinstance(chunk, header): files += chunk.files() backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, set(files)) except: ui.warn(_('unshelve backup aborted\n')) delete_backup(ui, repo, backupdir) raise ui.debug(_('applying shelved patch\n')) patchdone = 0 try: try: fp.seek(0) pfiles = {} internalpatch(fp, ui, 1, repo.root, files=pfiles) cmdutil.updatedir(ui, repo, pfiles) patchdone = 1 except: if opts['force']: patchdone = 1 else: ui.status(_('restoring backup files\n')) for realname, tmpname in backups.iteritems(): ui.debug(_('restoring %r to %r\n') % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: delete_backup(ui, repo, backupdir) if patchdone: ui.debug(_('removing shelved patches\n')) os.unlink(repo.join('shelve')) ui.status(_('unshelve completed\n')) else: raise patch.PatchError
def unremember_path(ui, repo): '''restores the working copy's hgrc''' audit_path = build_audit_path(repo) audit_path('hgrc') audit_path('hgrc.backup') base = repo.opener.base hgrc, backup = [os.path.join(base, x) for x in 'hgrc', 'hgrc.backup'] if os.path.exists(backup): util.copyfile(backup, hgrc) else: os.remove(hgrc)
def copyfile(abssrc, relsrc, otarget, exact): abstarget = scmutil.canonpath(repo.root, cwd, otarget) reltarget = repo.pathto(abstarget, cwd) target = repo.wjoin(abstarget) src = repo.wjoin(abssrc) state = repo.dirstate[abstarget] scmutil.checkportable(ui, abstarget) # check for collisions prevsrc = targets.get(abstarget) if prevsrc is not None: ui.warn(_('%s: not overwriting - %s collides with %s\n') % (reltarget, repo.pathto(abssrc, cwd), repo.pathto(prevsrc, cwd))) return # check for overwrites exists = os.path.lexists(target) if not after and exists or after and state in 'mn': if not opts['force']: ui.warn(_('%s: not overwriting - file exists\n') % reltarget) return if after: if not exists: if rename: ui.warn(_('%s: not recording move - %s does not exist\n') % (relsrc, reltarget)) else: ui.warn(_('%s: not recording copy - %s does not exist\n') % (relsrc, reltarget)) return elif not dryrun: try: if exists: os.unlink(target) targetdir = os.path.dirname(target) or '.' if not os.path.isdir(targetdir): os.makedirs(targetdir) util.copyfile(src, target) srcexists = True except IOError, inst: if inst.errno == errno.ENOENT: ui.warn(_('%s: deleted in working copy\n') % relsrc) srcexists = False else: ui.warn(_('%s: cannot copy - %s\n') % (relsrc, inst.strerror)) return True # report a failure
def unremember_path(ui, repo): '''restores the working copy's hgrc''' try: audit_path = scmutil.pathauditor(repo.root) except ImportError: audit_path = getattr(repo.opener, 'audit_path', util.path_auditor(repo.root)) audit_path('hgrc') audit_path('hgrc.backup') base = repo.opener.base hgrc, backup = [os.path.join(base, x) for x in 'hgrc', 'hgrc.backup'] if os.path.exists(backup): util.copyfile(backup, hgrc) else: os.remove(hgrc)
def unshelve(ui, repo, *pats, **opts): '''restore shelved changes''' try: fp = cStringIO.StringIO() fp.write(repo.opener('shelve').read()) if opts['inspect']: ui.status(fp.getvalue()) else: files = [] for chunk in parsepatch(fp): if isinstance(chunk, header): files += chunk.files() backupdir = repo.join('shelve-backups') backups = makebackup(ui, repo, backupdir, set(files)) ui.debug('applying shelved patch\n') patchdone = 0 try: try: fp.seek(0) internalpatch(fp, ui, 1, repo.root) patchdone = 1 except: if opts['force']: patchdone = 1 else: ui.status('restoring backup files\n') for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: try: ui.debug('removing backup files\n') shutil.rmtree(backupdir, True) except OSError: pass if patchdone: ui.debug("removing shelved patches\n") os.unlink(repo.join('shelve')) ui.status("unshelve completed\n") except IOError: ui.warn('nothing to unshelve\n')
def write(repo, refs): """Write bookmarks Write the given bookmark => hash dictionary to the .hg/bookmarks file in a format equal to those of localtags. We also store a backup of the previous state in undo.bookmarks that can be copied back on rollback. """ if os.path.exists(repo.join("bookmarks")): util.copyfile(repo.join("bookmarks"), repo.join("undo.bookmarks")) if current(repo) not in refs: setcurrent(repo, None) wlock = repo.wlock() try: file = repo.opener("bookmarks", "w", atomictemp=True) for refspec, node in refs.iteritems(): file.write("%s %s\n" % (hex(node), refspec)) file.rename() finally: wlock.release()
def write(repo, refs): '''Write bookmarks Write the given bookmark => hash dictionary to the .hg/bookmarks file in a format equal to those of localtags. We also store a backup of the previous state in undo.bookmarks that can be copied back on rollback. ''' if os.path.exists(repo.join('bookmarks')): util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks')) if current(repo) not in refs: setcurrent(repo, None) wlock = repo.wlock() try: file = repo.opener('bookmarks', 'w', atomictemp=True) for refspec, node in refs.iteritems(): file.write("%s %s\n" % (hex(node), refspec)) file.rename() finally: wlock.release()
def write(ui, repo, tasks): '''Write tasks Write the given task dictionary to the .hg/tasks/tasks file. We also store a backup of the previous state in .hg/tasks/undo.tasks that can be copied back on rollback. ''' if os.path.exists(repo.join('tasks/tasks')): util.copyfile(repo.join('tasks/tasks'), repo.join('tasks/undo.tasks')) # is this needed here? if current(repo) not in tasks: setcurrent(ui, repo, None) if not os.path.isdir(repo.join('tasks')): try: os.mkdir(repo.join('tasks')) except OSError, inst: if inst.errno != errno.EEXIST: raise
def bigupdate(ui, repo, *pats, **opts): '''fetch files from versions directory as recorded in '.bigfiles'. Also complain about necessary files missing in the version directory''' ds = read_bigfiledirstate(ui, repo) bigfiles = parse_bigfiles(repo) tracked_gotbig, added_big, modified, removed, gotsmall, \ missinginrepo = _bigstatus(ui, repo, pats, opts, ds, bigfiles) brepo = bigfiles_repo(ui) tocopy = removed if opts['clean']: tocopy = tocopy+modified for file in tocopy: f = repo.wjoin(file) hash= bigfiles[file] rf = "%s/%s.%s" % (brepo, file, hash) ui.write(_("fetching %s\n") % rf) if not opts['dry_run']: util.makedirs(os.path.dirname(f)) if os.path.exists(f): util.unlink(f) if os.path.exists(rf): util.copyfile(rf, f) else: fo = open(f, 'wb') rfo = gzip.open(rf + '.gz', 'rb') def read10Mb(): return rfo.read(1024*1024*10) for chunk in iter(read10Mb, ''): fo.write(chunk) fo.close() rfo.close() if missinginrepo: ui.write(_("\nNeeded files missing in bigrepo %s:\n") % brepo) for file in missinginrepo: hash = bigfiles[file] ui.write("%s.%s\n" % (file, hash)) write_bigfiledirstate(ui, repo, ds)
def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = ''.join( patch.diff(repo, parents[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched newfiles = set(f for f in files if f in contenders) if not newfiles: ui.status(_('no changes to shelve\n')) return 0 backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts['no_backup'] = True cmdutil.revert(ui, repo, repo['.'], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1) del fp # 3c. apply filtered patch to clean repo (shelve) ui.debug("saving patch to shelve\n") if opts['append']: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug('removing shelve file\n') if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err) return 0
def override_copy(orig, ui, repo, pats, opts, rename=False): # doesn't remove bfile on rename if len(pats) < 2: # this isn't legal, let the original function deal with it return orig(ui, repo, pats, opts, rename) def makestandin(relpath): return os.path.join(os.path.relpath('.', repo.getcwd()), bfutil.standin(util.canonpath(repo.root, repo.getcwd(), relpath))) fullpats = cmdutil.expandpats(pats) dest = fullpats[-1] if os.path.isdir(dest): if not os.path.isdir(makestandin(dest)): os.makedirs(makestandin(dest)) # This could copy both bfiles and normal files in one command, but we don't want # to do that first replace their matcher to only match normal files and run it # then replace it to just match bfiles and run it again nonormalfiles = False nobfiles = False oldmatch = cmdutil.match try: manifest = repo[None].manifest() def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): match = oldmatch(repo, pats, opts, globbed, default) m = copy.copy(match) notbfile = lambda f: not bfutil.is_standin(f) and bfutil.standin(f) not in manifest m._files = [f for f in m._files if notbfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: notbfile(f) and orig_matchfn(f) or None return m cmdutil.match = override_match result = orig(ui, repo, pats, opts, rename) except util.Abort as e: if str(e) != 'no files to copy': raise e else: nonormalfiles = True result = 0 finally: cmdutil.match = oldmatch # The first rename can cause our current working directory to be removed. In that case # there is nothing left to copy/rename so just quit. try: repo.getcwd() except OSError: return result try: # When we call orig below it creates the standins but we don't add them to the dir state # until later so lock during that time. wlock = repo.wlock() manifest = repo[None].manifest() def override_match(repo, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add .hgbfiles, we need to remove that now for pat in pats: if match_.patkind(pat) == None and bfutil.short_name in pat: newpats.append(pat.replace( bfutil.short_name, '')) else: newpats.append(pat) match = oldmatch(repo, newpats, opts, globbed, default) m = copy.copy(match) bfile = lambda f: bfutil.standin(f) in manifest m._files = [bfutil.standin(f) for f in m._files if bfile(f)] m._fmap = set(m._files) orig_matchfn = m.matchfn m.matchfn = lambda f: bfutil.is_standin(f) and bfile(bfutil.split_standin(f)) and orig_matchfn(bfutil.split_standin(f)) or None return m cmdutil.match = override_match listpats = [] for pat in pats: if match_.patkind(pat) != None: listpats.append(pat) else: listpats.append(makestandin(pat)) try: origcopyfile = util.copyfile copiedfiles = [] def override_copyfile(src, dest): if bfutil.short_name in src and bfutil.short_name in dest: destbfile = dest.replace(bfutil.short_name, '') if not opts['force'] and os.path.exists(destbfile): raise IOError('', _('destination bfile already exists')) copiedfiles.append((src, dest)) origcopyfile(src, dest) util.copyfile = override_copyfile result += orig(ui, repo, listpats, opts, rename) finally: util.copyfile = origcopyfile bfdirstate = bfutil.open_bfdirstate(ui, repo) for (src, dest) in copiedfiles: if bfutil.short_name in src and bfutil.short_name in dest: srcbfile = src.replace(bfutil.short_name, '') destbfile = dest.replace(bfutil.short_name, '') destbfiledir = os.path.dirname(destbfile) or '.' if not os.path.isdir(destbfiledir): os.makedirs(destbfiledir) if rename: os.rename(srcbfile, destbfile) bfdirstate.remove(bfutil.unixpath(os.path.relpath(srcbfile, repo.root))) else: util.copyfile(srcbfile, destbfile) bfdirstate.add(bfutil.unixpath(os.path.relpath(destbfile, repo.root))) bfdirstate.write() except util.Abort as e: if str(e) != 'no files to copy': raise e else: nobfiles = True finally: cmdutil.match = oldmatch wlock.release() if nobfiles and nonormalfiles: raise util.Abort(_('no files to copy')) return result
def dodiff(ui, repo, cmdline, pats, opts): """Do the actual diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir """ revs = opts.get("rev") change = opts.get("change") do3way = "$parent2" in cmdline if revs and change: msg = _("cannot specify --rev and --change at the same time") raise util.Abort(msg) elif change: node2 = scmutil.revsingle(repo, change, None).node() node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = scmutil.revpair(repo, revs) if not revs: node1b = repo.dirstate.p2() else: node1b = nullid # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False matcher = scmutil.match(repo[node2], pats, opts) mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3]) if do3way: mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3]) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = tempfile.mkdtemp(prefix="extdiff.") try: # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0] rev1a = "@%d" % repo[node1a].rev() if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0] rev1b = "@%d" % repo[node1b].rev() else: dir1b = None rev1b = "" fns_and_mtime = [] # If node2 in not the wc or there is >1 change, copy it dir2root = "" rev2 = "" if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0] rev2 = "@%d" % repo[node2].rev() elif len(common) > 1: # we only actually need to get the files to copy back to # the working dir in this case (because the other cases # are: diffing 2 revisions or single file -- in which case # the file is already directly passed to the diff tool). dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot) else: # This lets the diff tool open the changed file directly dir2 = "" dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 replace = { "parent": dir1a, "parent1": dir1a, "parent2": dir1b, "plabel1": label1a, "plabel2": label1b, "clabel": label2, "child": dir2, "root": repo.root, } def quote(match): pre = match.group(2) key = match.group(3) if not do3way and key == "parent2": return pre return pre + util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent regex = r"""(['"]?)([^\s'"$]*)""" r"\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1" if not do3way and not re.search(regex, cmdline): cmdline += " $parent1 $child" cmdline = re.sub(regex, quote, cmdline) ui.debug("running %r in %s\n" % (cmdline, tmproot)) ui.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.lstat(copy_fn).st_mtime != mtime: ui.debug("file changed while diffing. " "Overwriting: %s (src: %s)\n" % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_("cleaning up temp directory\n")) shutil.rmtree(tmproot)
def dodiff(ui, repo, cmdline, pats, opts, guitool=False): '''Do the actual diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get(b'rev') change = opts.get(b'change') do3way = b'$parent2' in cmdline if revs and change: msg = _(b'cannot specify --rev and --change at the same time') raise error.Abort(msg) elif change: ctx2 = scmutil.revsingle(repo, change, None) ctx1a, ctx1b = ctx2.p1(), ctx2.p2() else: ctx1a, ctx2 = scmutil.revpair(repo, revs) if not revs: ctx1b = repo[None].p2() else: ctx1b = repo[nullid] perfile = opts.get(b'per_file') confirm = opts.get(b'confirm') node1a = ctx1a.node() node1b = ctx1b.node() node2 = ctx2.node() # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False subrepos = opts.get(b'subrepos') matcher = scmutil.match(repo[node2], pats, opts) if opts.get(b'patch'): if subrepos: raise error.Abort(_(b'--patch cannot be used with --subrepos')) if perfile: raise error.Abort(_(b'--patch cannot be used with --per-file')) if node2 is None: raise error.Abort(_(b'--patch requires two revisions')) else: mod_a, add_a, rem_a = map( set, repo.status(node1a, node2, matcher, listsubrepos=subrepos)[:3]) if do3way: mod_b, add_b, rem_b = map( set, repo.status(node1b, node2, matcher, listsubrepos=subrepos)[:3], ) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = pycompat.mkdtemp(prefix=b'extdiff.') try: if not opts.get(b'patch'): # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[0] rev1a = b'@%d' % repo[node1a].rev() if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot, subrepos)[0] rev1b = b'@%d' % repo[node1b].rev() else: dir1b = None rev1b = b'' fnsandstat = [] # If node2 in not the wc or there is >1 change, copy it dir2root = b'' rev2 = b'' if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] rev2 = b'@%d' % repo[node2].rev() elif len(common) > 1: # we only actually need to get the files to copy back to # the working dir in this case (because the other cases # are: diffing 2 revisions or single file -- in which case # the file is already directly passed to the diff tool). dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos) else: # This lets the diff tool open the changed file directly dir2 = b'' dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 else: template = b'hg-%h.patch' with formatter.nullformatter(ui, b'extdiff', {}) as fm: cmdutil.export( repo, [repo[node1a].rev(), repo[node2].rev()], fm, fntemplate=repo.vfs.reljoin(tmproot, template), match=matcher, ) label1a = cmdutil.makefilename(repo[node1a], template) label2 = cmdutil.makefilename(repo[node2], template) dir1a = repo.vfs.reljoin(tmproot, label1a) dir2 = repo.vfs.reljoin(tmproot, label2) dir1b = None label1b = None fnsandstat = [] if not perfile: # Run the external tool on the 2 temp directories or the patches cmdline = formatcmdline( cmdline, repo.root, do3way=do3way, parent1=dir1a, plabel1=label1a, parent2=dir1b, plabel2=label1b, child=dir2, clabel=label2, ) ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') else: # Run the external tool once for each pair of files _runperfilediff( cmdline, repo.root, ui, guitool=guitool, do3way=do3way, confirm=confirm, commonfiles=common, tmproot=tmproot, dir1a=dir1a, dir1b=dir1b, dir2root=dir2root, dir2=dir2, rev1a=rev1a, rev1b=rev1b, rev2=rev2, ) for copy_fn, working_fn, st in fnsandstat: cpstat = os.lstat(copy_fn) # Some tools copy the file and attributes, so mtime may not detect # all changes. A size check will detect more cases, but not all. # The only certain way to detect every case is to diff all files, # which could be expensive. # copyfile() carries over the permission, so the mode check could # be in an 'elif' branch, but for the case where the file has # changed without affecting mtime or size. if (cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] or cpstat.st_size != st.st_size or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)): ui.debug(b'file changed while diffing. ' b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_(b'cleaning up temp directory\n')) shutil.rmtree(tmproot)
def shelvefunc(ui, repo, message, match, opts): changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = ''.join( patch.diff(repo, repo.dirstate.parents()[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in files if f in contenders] if not newfiles: ui.status(_('no changes to shelve\n')) return 0 modified = dict.fromkeys(changes[0]) backupdir = repo.join('shelve-backups') try: bkfiles = [f for f in newfiles if f in modified] backups = makebackup(ui, repo, backupdir, bkfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(sp) doshelve = sp.tell() sp.seek(0) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], backups.has_key) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root) del fp # 3c. apply filtered patch to clean repo (shelve) if doshelve: ui.debug("saving patch to shelve\n") if opts['append']: f = repo.opener(shelfpath, "a") else: f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f del sp except: try: for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) ui.debug('removing shelve file\n') os.unlink(repo.join(shelfpath)) except OSError: pass return 0 finally: try: for realname, tmpname in backups.iteritems(): ui.debug('removing backup for %r : %r\n' % (realname, tmpname)) os.unlink(tmpname) os.rmdir(backupdir) except OSError: pass
def dodiff(ui, repo, cmdline, pats, opts): '''Do the actual diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get('rev') change = opts.get('change') do3way = '$parent2' in cmdline if revs and change: msg = _('cannot specify --rev and --change at the same time') raise error.Abort(msg) elif change: node2 = scmutil.revsingle(repo, change, None).node() node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = scmutil.revpair(repo, revs) if not revs: node1b = repo.dirstate.p2() else: node1b = nullid # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False subrepos = opts.get('subrepos') matcher = scmutil.match(repo[node2], pats, opts) if opts.get('patch'): if subrepos: raise error.Abort(_('--patch cannot be used with --subrepos')) if node2 is None: raise error.Abort(_('--patch requires two revisions')) else: mod_a, add_a, rem_a = list( map(set, repo.status(node1a, node2, matcher, listsubrepos=subrepos)[:3])) if do3way: mod_b, add_b, rem_b = list( map( set, repo.status(node1b, node2, matcher, listsubrepos=subrepos)[:3])) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') try: if not opts.get('patch'): # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[0] rev1a = '@%d' % repo[node1a].rev() if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot, subrepos)[0] rev1b = '@%d' % repo[node1b].rev() else: dir1b = None rev1b = '' fnsandstat = [] # If node2 in not the wc or there is >1 change, copy it dir2root = '' rev2 = '' if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] rev2 = '@%d' % repo[node2].rev() elif len(common) > 1: #we only actually need to get the files to copy back to #the working dir in this case (because the other cases #are: diffing 2 revisions or single file -- in which case #the file is already directly passed to the diff tool). dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 else: template = 'hg-%h.patch' cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()], fntemplate=repo.vfs.reljoin(tmproot, template), match=matcher) label1a = cmdutil.makefilename(repo, template, node1a) label2 = cmdutil.makefilename(repo, template, node2) dir1a = repo.vfs.reljoin(tmproot, label1a) dir2 = repo.vfs.reljoin(tmproot, label2) dir1b = None label1b = None fnsandstat = [] # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 replace = { 'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, 'plabel1': label1a, 'plabel2': label1b, 'clabel': label2, 'child': dir2, 'root': repo.root } def quote(match): pre = match.group(2) key = match.group(3) if not do3way and key == 'parent2': return pre return pre + util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent regex = (r'''(['"]?)([^\s'"$]*)''' r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') if not do3way and not re.search(regex, cmdline): cmdline += ' $parent1 $child' cmdline = re.sub(regex, quote, cmdline) ui.debug('running %r in %s\n' % (cmdline, tmproot)) ui.system(cmdline, cwd=tmproot, blockedtag='extdiff') for copy_fn, working_fn, st in fnsandstat: cpstat = os.lstat(copy_fn) # Some tools copy the file and attributes, so mtime may not detect # all changes. A size check will detect more cases, but not all. # The only certain way to detect every case is to diff all files, # which could be expensive. # copyfile() carries over the permission, so the mode check could # be in an 'elif' branch, but for the case where the file has # changed without affecting mtime or size. if (cpstat.st_mtime != st.st_mtime or cpstat.st_size != st.st_size or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)): ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
def shelve(ui, repo, *pats, **opts): '''interactively select changes to set aside If a list of files is omitted, all changes reported by :hg:` status` will be candidates for shelving. You will be prompted for whether to shelve changes to each modified file, and for files with multiple changes, for each change to use. The shelve command works with the Color extension to display diffs in color. On each prompt, the following responses are possible:: y - shelve this change n - skip this change s - skip remaining changes to this file f - shelve remaining changes to this file d - done, skip remaining changes and files a - shelve all changes to all remaining files q - quit, shelving no changes ? - display help ''' if not ui.interactive(): raise util.Abort(_('shelve can only be run interactively')) # List all the active shelves by name and return ' if opts['list']: listshelves(ui, repo) return forced = opts['force'] or opts['append'] # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) if os.path.exists(repo.join(shelfpath)) and not forced: raise util.Abort(_('shelve data already exists')) def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:3] modified, added, removed = changes diffopts = patch.diffopts(ui, opts={'git': True, 'nodates': True}) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO(''.join(chunks)) try: ac = parsepatch(fp) except patch.PatchError, err: raise util.Abort(_('error parsing patch: %s') % err) del fp # 1. filter patch, so we have intending-to apply subset of it chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched changed = modified + added + removed newfiles = set(f for f in changed if f in contenders) if not newfiles: ui.status(_('no changes to shelve\n')) return 0 # 2. backup changed files, so we can restore them in case of error backupdir = repo.join('shelve-backups') try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts['no_backup'] = True cmdutil.revert(ui, repo, repo['.'], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1, eolmode=None) except patch.PatchError, err: raise util.Abort(str(err)) del fp # 4. We prepared working directory according to filtered # patch. Now is the time to save the shelved changes! ui.debug("saving patch to shelve\n") if opts['append']: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # Our calls to copystat() here and above are a # hack to trick any editors that have f open that # we haven't modified them. # # Also note that this racy as an editor could # notice the file's mtime before we've finished # writing it. shutil.copystat(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug('removing shelve file\n') if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err)
def shelvefunc(ui, repo, message, match, opts): parents = repo.dirstate.parents() changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = "".join(patch.diff(repo, parents[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts["all"]) rc = refilterpatch(ac, chunks) # set of files to be processed contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass # exclude sources of copies that are otherwise untouched newfiles = set(f for f in files if f in contenders) if not newfiles: ui.status(_("no changes to shelve\n")) return 0 backupdir = repo.join("shelve-backups") try: backups = makebackup(ui, repo, backupdir, newfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: c.write(sp) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: # skip files not selected for shelving if c.filename() in newfiles: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) opts["no_backup"] = True cmdutil.revert(ui, repo, repo["."], parents, *[os.path.join(repo.root, f) for f in newfiles], **opts) for f in added: if f in newfiles: util.unlinkpath(repo.wjoin(f)) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug("applying patch\n") ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1) del fp # 3c. apply filtered patch to clean repo (shelve) ui.debug("saving patch to shelve\n") if opts["append"]: sp.write(repo.opener(shelfpath).read()) sp.seek(0) f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f, sp except: ui.warn("shelving failed: %s\n" % sys.exc_info()[1]) try: # re-schedule remove matchremoved = scmutil.matchfiles(repo, removed) cmdutil.forget(ui, repo, matchremoved, "", True) for f in removed: if f in newfiles and os.path.isfile(f): os.unlink(f) # copy back backups for realname, tmpname in backups.iteritems(): ui.debug("restoring %r to %r\n" % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) # re-schedule add matchadded = scmutil.matchfiles(repo, added) cmdutil.add(ui, repo, matchadded, False, False, "", True) ui.debug("removing shelve file\n") if os.path.isfile(repo.wjoin(shelfpath)): os.unlink(repo.join(shelfpath)) except OSError, err: ui.warn("restoring backup failed: %s\n" % err) return 0
def overridecopy(orig, ui, repo, pats, opts, rename=False): # doesn't remove largefile on rename if len(pats) < 2: # this isn't legal, let the original function deal with it return orig(ui, repo, pats, opts, rename) def makestandin(relpath): path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) return os.path.join(repo.wjoin(lfutil.standin(path))) fullpats = scmutil.expandpats(pats) dest = fullpats[-1] if os.path.isdir(dest): if not os.path.isdir(makestandin(dest)): os.makedirs(makestandin(dest)) # This could copy both lfiles and normal files in one command, # but we don't want to do that. First replace their matcher to # only match normal files and run it, then replace it to just # match largefiles and run it again. nonormalfiles = False nolfiles = False try: try: installnormalfilesmatchfn(repo[None].manifest()) result = orig(ui, repo, pats, opts, rename) except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nonormalfiles = True result = 0 finally: restorematchfn() # The first rename can cause our current working directory to be removed. # In that case there is nothing left to copy/rename so just quit. try: repo.getcwd() except OSError: return result try: try: # When we call orig below it creates the standins but we don't add # them to the dir state until later so lock during that time. wlock = repo.wlock() manifest = repo[None].manifest() oldmatch = None # for the closure def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add the standin # directory; we need to remove that now for pat in pats: if match_.patkind(pat) is None and lfutil.shortname in pat: newpats.append(pat.replace(lfutil.shortname, '')) else: newpats.append(pat) match = oldmatch(ctx, newpats, opts, globbed, default) m = copy.copy(match) lfile = lambda f: lfutil.standin(f) in manifest m._files = [lfutil.standin(f) for f in m._files if lfile(f)] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn m.matchfn = lambda f: (lfutil.isstandin(f) and (f in manifest) and origmatchfn(lfutil.splitstandin(f)) or None) return m oldmatch = installmatchfn(overridematch) listpats = [] for pat in pats: if match_.patkind(pat) is not None: listpats.append(pat) else: listpats.append(makestandin(pat)) try: origcopyfile = util.copyfile copiedfiles = [] def overridecopyfile(src, dest): if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): destlfile = dest.replace(lfutil.shortname, '') if not opts['force'] and os.path.exists(destlfile): raise IOError('', _('destination largefile already exists')) copiedfiles.append((src, dest)) origcopyfile(src, dest) util.copyfile = overridecopyfile result += orig(ui, repo, listpats, opts, rename) finally: util.copyfile = origcopyfile lfdirstate = lfutil.openlfdirstate(ui, repo) for (src, dest) in copiedfiles: if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.' if not os.path.isdir(destlfiledir): os.makedirs(destlfiledir) if rename: os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.remove(srclfile) else: util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.add(destlfile) lfdirstate.write() except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nolfiles = True finally: restorematchfn() wlock.release() if nolfiles and nonormalfiles: raise util.Abort(_('no files to copy')) return result
def dorecord(ui, repo, committer, *pats, **opts): if not ui.interactive: raise util.Abort(_('running non-interactively, use commit instead')) def recordfunc(ui, repo, message, match, opts): """This is generic record driver. It's job is to interactively filter local changes, and accordingly prepare working dir into a state, where the job can be delegated to non-interactive commit command such as 'commit' or 'qrefresh'. After the actual job is done by non-interactive command, working dir state is restored to original. In the end we'll record intresting changes, and everything else will be left in place, so the user can continue his work. """ if match.files(): changes = None else: changes = repo.status(match=match)[:3] modified, added, removed = changes match = cmdutil.matchfiles(repo, modified + added + removed) diffopts = mdiff.diffopts(git=True, nodates=True) chunks = patch.diff(repo, repo.dirstate.parents()[0], match=match, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it if changes is not None: chunks = filterpatch(opts, parsepatch(changes, fp), chunkselector) else: chgs = repo.status(match=match)[:3] chunks = filterpatch(opts, parsepatch(chgs, fp), chunkselector) del fp contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in match.files() if f in contenders] if not newfiles: ui.status(_('no changes to record\n')) return 0 if changes is None: match = cmdutil.matchfiles(repo, newfiles) changes = repo.status(match=match) modified = dict.fromkeys(changes[0]) # 2. backup changed files, so we can restore them in the end backups = {} backupdir = repo.join('record-backups') try: os.mkdir(backupdir) except OSError, err: if err.errno != errno.EEXIST: raise try: # backup continues for f in newfiles: if f not in modified: continue fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_') + '.', dir=backupdir) os.close(fd) ui.debug(_('backup %r as %r\n') % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) backups[f] = tmpname fp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], backups.has_key) # 3b. (apply) if dopatch: try: ui.debug(_('applying patch\n')) ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root) except patch.PatchError, err: s = str(err) if s: raise util.Abort(s) else: raise util.Abort(_('patch failed to apply')) del fp # 4. We prepared working directory according to filtered patch. # Now is the time to delegate the job to commit/qrefresh or the like! # it is important to first chdir to repo root -- we'll call a # highlevel command with list of pathnames relative to repo root cwd = os.getcwd() os.chdir(repo.root) try: committer(ui, repo, newfiles, opts) finally: os.chdir(cwd) return 0
def overridecopy(orig, ui, repo, pats, opts, rename=False): # doesn't remove largefile on rename if len(pats) < 2: # this isn't legal, let the original function deal with it return orig(ui, repo, pats, opts, rename) def makestandin(relpath): path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) return os.path.join(repo.wjoin(lfutil.standin(path))) fullpats = scmutil.expandpats(pats) dest = fullpats[-1] if os.path.isdir(dest): if not os.path.isdir(makestandin(dest)): os.makedirs(makestandin(dest)) # This could copy both lfiles and normal files in one command, # but we don't want to do that. First replace their matcher to # only match normal files and run it, then replace it to just # match largefiles and run it again. nonormalfiles = False nolfiles = False try: try: installnormalfilesmatchfn(repo[None].manifest()) result = orig(ui, repo, pats, opts, rename) except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nonormalfiles = True result = 0 finally: restorematchfn() # The first rename can cause our current working directory to be removed. # In that case there is nothing left to copy/rename so just quit. try: repo.getcwd() except OSError: return result try: try: # When we call orig below it creates the standins but we don't add # them to the dir state until later so lock during that time. wlock = repo.wlock() manifest = repo[None].manifest() oldmatch = None # for the closure def overridematch(ctx, pats=[], opts={}, globbed=False, default='relpath'): newpats = [] # The patterns were previously mangled to add the standin # directory; we need to remove that now for pat in pats: if match_.patkind(pat) is None and lfutil.shortname in pat: newpats.append(pat.replace(lfutil.shortname, '')) else: newpats.append(pat) match = oldmatch(ctx, newpats, opts, globbed, default) m = copy.copy(match) lfile = lambda f: lfutil.standin(f) in manifest m._files = [lfutil.standin(f) for f in m._files if lfile(f)] m._fmap = set(m._files) m._always = False origmatchfn = m.matchfn m.matchfn = lambda f: (lfutil.isstandin(f) and (f in manifest) and origmatchfn( lfutil.splitstandin(f)) or None) return m oldmatch = installmatchfn(overridematch) listpats = [] for pat in pats: if match_.patkind(pat) is not None: listpats.append(pat) else: listpats.append(makestandin(pat)) try: origcopyfile = util.copyfile copiedfiles = [] def overridecopyfile(src, dest): if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): destlfile = dest.replace(lfutil.shortname, '') if not opts['force'] and os.path.exists(destlfile): raise IOError( '', _('destination largefile already exists')) copiedfiles.append((src, dest)) origcopyfile(src, dest) util.copyfile = overridecopyfile result += orig(ui, repo, listpats, opts, rename) finally: util.copyfile = origcopyfile lfdirstate = lfutil.openlfdirstate(ui, repo) for (src, dest) in copiedfiles: if (lfutil.shortname in src and dest.startswith(repo.wjoin(lfutil.shortname))): srclfile = src.replace(repo.wjoin(lfutil.standin('')), '') destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '') destlfiledir = os.path.dirname( repo.wjoin(destlfile)) or '.' if not os.path.isdir(destlfiledir): os.makedirs(destlfiledir) if rename: os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.remove(srclfile) else: util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile)) lfdirstate.add(destlfile) lfdirstate.write() except util.Abort, e: if str(e) != _('no files to copy'): raise e else: nolfiles = True finally: restorematchfn() wlock.release() if nolfiles and nonormalfiles: raise util.Abort(_('no files to copy')) return result
def dodiff(ui, repo, diffcmd, diffopts, pats, opts): '''Do the actuall diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get('rev') change = opts.get('change') if revs and change: msg = _('cannot specify --rev and --change at the same time') raise util.Abort(msg) elif change: node2 = repo.lookup(change) node1 = repo[node2].parents()[0].node() else: node1, node2 = cmdutil.revpair(repo, revs) matcher = cmdutil.match(repo, pats, opts) modified, added, removed = repo.status(node1, node2, matcher)[:3] if not (modified or added or removed): return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') dir2root = '' try: # Always make a copy of node1 dir1 = snapshot(ui, repo, modified + removed, node1, tmproot)[0] changes = len(modified) + len(removed) + len(added) # If node2 in not the wc or there is >1 change, copy it if node2 or changes > 1: dir2, fns_and_mtime = snapshot(ui, repo, modified + added, node2, tmproot) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root fns_and_mtime = [] # If only one change, diff the files instead of the directories if changes == 1: if len(modified): dir1 = os.path.join(dir1, util.localpath(modified[0])) dir2 = os.path.join(dir2root, dir2, util.localpath(modified[0])) elif len(removed): dir1 = os.path.join(dir1, util.localpath(removed[0])) dir2 = os.devnull else: dir1 = os.devnull dir2 = os.path.join(dir2root, dir2, util.localpath(added[0])) cmdline = ('%s %s %s %s' % (util.shellquote(diffcmd), ' '.join(diffopts), util.shellquote(dir1), util.shellquote(dir2))) ui.debug(_('running %r in %s\n') % (cmdline, tmproot)) util.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.path.getmtime(copy_fn) != mtime: ui.debug( _('file changed while diffing. ' 'Overwriting: %s (src: %s)\n') % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts): if not ui.interactive(): raise util.Abort(_('running non-interactively, use %s instead') % cmdsuggest) def recordfunc(ui, repo, message, match, opts): """This is generic record driver. Its job is to interactively filter local changes, and accordingly prepare working directory into a state in which the job can be delegated to a non-interactive commit command such as 'commit' or 'qrefresh'. After the actual job is done by non-interactive command, the working directory is restored to its original state. In the end we'll record interesting changes, and everything else will be left in place, so the user can continue working. """ merge = len(repo[None].parents()) > 1 if merge: raise util.Abort(_('cannot partially commit a merge ' '(use "hg commit" instead)')) changes = repo.status(match=match)[:3] diffopts = mdiff.diffopts(git=True, nodates=True, ignorews=opts.get('ignore_all_space'), ignorewsamount=opts.get('ignore_space_change'), ignoreblanklines=opts.get('ignore_blank_lines')) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it chunks = filterpatch(ui, parsepatch(fp)) del fp contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass changed = changes[0] + changes[1] + changes[2] newfiles = [f for f in changed if f in contenders] if not newfiles: ui.status(_('no changes to record\n')) return 0 modified = set(changes[0]) # 2. backup changed files, so we can restore them in the end if backupall: tobackup = changed else: tobackup = [f for f in newfiles if f in modified] backups = {} if tobackup: backupdir = repo.join('record-backups') try: os.mkdir(backupdir) except OSError, err: if err.errno != errno.EEXIST: raise try: # backup continues for f in tobackup: fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.', dir=backupdir) os.close(fd) ui.debug('backup %r as %r\n' % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) shutil.copystat(repo.wjoin(f), tmpname) backups[f] = tmpname fp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.p1(), lambda key: key in backups) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(ui, repo, fp, 1, eolmode=None) except patch.PatchError, err: raise util.Abort(str(err)) del fp # 4. We prepared working directory according to filtered # patch. Now is the time to delegate the job to # commit/qrefresh or the like! # it is important to first chdir to repo root -- we'll call # a highlevel command with list of pathnames relative to # repo root cwd = os.getcwd() os.chdir(repo.root) try: commitfunc(ui, repo, *newfiles, **opts) finally: os.chdir(cwd) return 0
def restoreuntracked(repo, untracked): for f in untracked: shaname = util.sha1(f).hexdigest() util.copyfile(repo.join('tasks/untrackedbackup/%s' % shaname), util.pathto(repo.root, None, f))
def shelvefunc(ui, repo, message, match, opts): changes = repo.status(match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = ''.join(patch.diff(repo, repo.dirstate.parents()[0], match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac, not opts['all']) rc = refilterpatch(ac, chunks) contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in files if f in contenders] if not newfiles: ui.status(_('no changes to shelve\n')) return 0 modified = dict.fromkeys(changes[0]) backupdir = repo.join('shelve-backups') try: bkfiles = [f for f in newfiles if f in modified] backups = makebackup(ui, repo, backupdir, bkfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(sp) doshelve = sp.tell() sp.seek(0) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], backups.has_key) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug('applying patch\n') ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root) del fp # 3c. apply filtered patch to clean repo (shelve) if doshelve: ui.debug("saving patch to shelve\n") if opts['append']: f = repo.opener(shelfpath, "a") else: f = repo.opener(shelfpath, "w") f.write(sp.getvalue()) del f del sp except: try: for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) ui.debug('removing shelve file\n') os.unlink(repo.join(shelfpath)) except OSError: pass return 0 finally: try: for realname, tmpname in backups.iteritems(): ui.debug('removing backup for %r : %r\n' % (realname, tmpname)) os.unlink(tmpname) os.rmdir(backupdir) except OSError: pass
def shelvefunc(ui, repo, message, match, opts): files = [] if match.files(): changes = None else: changes = repo.status(match=match)[:3] modified, added, removed = changes files = modified + added + removed match = cmdutil.matchfiles(repo, files) diffopts = repo.attic.diffopts({'git': True, 'nodates': True}) chunks = patch.diff(repo, repo.dirstate.parents()[0], match=match, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it ac = record.parsepatch(fp) chunks = record.filterpatch(ui, ac) # and a not-intending-to apply subset of it rc = refilterpatch(ac, chunks) del fp contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in files if f in contenders] if not newfiles: ui.status(_('no changes to shelve\n')) return 0 modified = dict.fromkeys(changes[0]) backups = {} backupdir = repo.join('shelve-backups') try: bkfiles = [f for f in newfiles if f in modified] backups = makebackup(ui, repo, backupdir, bkfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(sp) doshelve = sp.tell() sp.seek(0) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], backups.has_key) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug(_('applying patch\n')) ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root) del fp # 3c. apply filtered patch to clean repo (shelve) if doshelve: ui.debug(_("saving patch to %s\n") % (name)) s = repo.attic f = s.opener(name, 'w') f.write(sp.getvalue()) del f s.currentpatch = name s.persiststate() del sp except: try: for realname, tmpname in backups.iteritems(): ui.debug( _('restoring %r to %r\n') % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) except OSError: pass return 0 finally: try: for realname, tmpname in backups.iteritems(): ui.debug( _('removing backup for %r : %r\n') % (realname, tmpname)) os.unlink(tmpname) os.rmdir(backupdir) except OSError: pass
def makebackup(ui, repo, dir, files): try: os.mkdir(dir) except OSError, err: if err.errno != errno.EEXIST: raise backups = {} for f in files: if os.path.isfile(repo.wjoin(f)): fd, tmpname = tempfile.mkstemp(prefix=f.replace("/", "_") + ".", dir=dir) os.close(fd) ui.debug("backup %r as %r\n" % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) backups[f] = tmpname return backups def getshelfpath(repo, name): if name: shelfpath = "shelves/" + name else: # Check if a shelf from an older version exists if os.path.isfile(repo.join("shelve")): shelfpath = "shelve" else: shelfpath = "shelves/default"
def shelvefunc(ui, repo, message, match, opts): # If an MQ patch is applied, consider all qdiff changes if hasattr(repo, 'mq') and repo.mq.applied and repo['.'] == repo['qtip']: qtip = repo['.'] basenode = qtip.parents()[0].node() else: basenode = repo.dirstate.parents()[0] changes = repo.status(node1=basenode, match=match)[:5] modified, added, removed = changes[:3] files = modified + added + removed diffopts = mdiff.diffopts(git=True, nodates=True) patch_diff = ''.join(patch.diff(repo, basenode, match=match, changes=changes, opts=diffopts)) fp = cStringIO.StringIO(patch_diff) ac = parsepatch(fp) fp.close() chunks = filterpatch(ui, ac) rc = refilterpatch(ac, chunks) contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in files if f in contenders] if not newfiles: ui.status(_('no changes to shelve\n')) return 0 modified = dict.fromkeys(changes[0]) backupdir = repo.join('shelve-backups') try: bkfiles = [f for f in newfiles if f in modified] backups = makebackup(ui, repo, backupdir, bkfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(sp) doshelve = sp.tell() sp.seek(0) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, basenode, backups.has_key) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug(_('applying patch\n')) ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root, eolmode=None) del fp # 3c. apply filtered patch to clean repo (shelve) if doshelve: ui.debug(_('saving patch to shelve\n')) if opts['append']: f = repo.opener('shelve', "a") else: f = repo.opener('shelve', "w") f.write(sp.getvalue()) del f del sp except: try: for realname, tmpname in backups.iteritems(): ui.debug(_('restoring %r to %r\n') % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) ui.debug(_('removing shelve file\n')) os.unlink(repo.join('shelve')) except (IOError, OSError), e: ui.warn(_('abort: backup restore failed, %s\n') % str(e)) return 0
def unshelve(ui, repo, **opts): """restore shelved changes""" # Shelf name and path shelfname = opts.get("name") shelfpath = getshelfpath(repo, shelfname) # List all the active shelves by name and return ' if opts["list"]: listshelves(ui, repo) return try: patch_diff = repo.opener(shelfpath).read() fp = cStringIO.StringIO(patch_diff) if opts["inspect"]: # wrap ui.write so diff output can be labeled/colorized def wrapwrite(orig, *args, **kw): label = kw.pop("label", "") if label: label += " " for chunk, l in patch.difflabel(lambda: args): orig(chunk, label=label + l) oldwrite = ui.write extensions.wrapfunction(ui, "write", wrapwrite) try: ui.status(fp.getvalue()) finally: ui.write = oldwrite else: files = [] ac = parsepatch(fp) for chunk in ac: if isinstance(chunk, header): files += chunk.files() backupdir = repo.join("shelve-backups") backups = makebackup(ui, repo, backupdir, set(files)) ui.debug("applying shelved patch\n") patchdone = 0 try: try: fp.seek(0) patch.internalpatch(ui, repo, fp, 1) patchdone = 1 except: if opts["force"]: patchdone = 1 else: ui.status("restoring backup files\n") for realname, tmpname in backups.iteritems(): ui.debug("restoring %r to %r\n" % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: try: ui.debug("removing backup files\n") shutil.rmtree(backupdir, True) except OSError: pass if patchdone: ui.debug("removing shelved patches\n") os.unlink(repo.join(shelfpath)) ui.status("unshelve completed\n") except IOError: ui.warn("nothing to unshelve\n")
def dodiff(ui, repo, diffcmd, diffopts, pats, opts): '''Do the actuall diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' node1, node2 = cmdutil.revpair(repo, opts['rev']) files, matchfn, anypats = cmdutil.matchpats(repo, pats, opts) modified, added, removed, deleted, unknown = repo.status( node1, node2, files, match=matchfn)[:5] if not (modified or added or removed): return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') dir2root = '' try: # Always make a copy of node1 dir1 = snapshot_node(ui, repo, modified + removed, node1, tmproot) changes = len(modified) + len(removed) + len(added) fns_and_mtime = [] # If node2 in not the wc or there is >1 change, copy it if node2: dir2 = snapshot_node(ui, repo, modified + added, node2, tmproot) elif changes > 1: #we only actually need to get the files to copy back to the working #dir in this case (because the other cases are: diffing 2 revisions #or single file -- in which case the file is already directly passed #to the diff tool). dir2, fns_and_mtime = snapshot_wdir(ui, repo, modified + added, tmproot) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root # If only one change, diff the files instead of the directories if changes == 1 : if len(modified): dir1 = os.path.join(dir1, util.localpath(modified[0])) dir2 = os.path.join(dir2root, dir2, util.localpath(modified[0])) elif len(removed) : dir1 = os.path.join(dir1, util.localpath(removed[0])) dir2 = os.devnull else: dir1 = os.devnull dir2 = os.path.join(dir2root, dir2, util.localpath(added[0])) cmdline = ('%s %s %s %s' % (util.shellquote(diffcmd), ' '.join(diffopts), util.shellquote(dir1), util.shellquote(dir2))) ui.debug('running %r in %s\n' % (cmdline, tmproot)) util.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.path.getmtime(copy_fn) != mtime: ui.debug('File changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
def dodiff(ui, repo, diffcmd, diffopts, pats, opts): '''Do the actuall diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get('rev') change = opts.get('change') args = ' '.join(diffopts) do3way = '$parent2' in args if revs and change: msg = _('cannot specify --rev and --change at the same time') raise util.Abort(msg) elif change: node2 = repo.lookup(change) node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = cmdutil.revpair(repo, revs) if not revs: node1b = repo.dirstate.parents()[1] else: node1b = nullid # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False matcher = cmdutil.match(repo, pats, opts) mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3]) if do3way: mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3]) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') try: # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0] if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0] else: dir1b = None fns_and_mtime = [] # If node2 in not the wc or there is >1 change, copy it dir2root = '' if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0] elif len(common) > 1: #we only actually need to get the files to copy back to #the working dir in this case (because the other cases #are: diffing 2 revisions or single file -- in which case #the file is already directly passed to the diff tool). dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(dir1a, common_file) if not os.path.isfile(os.path.join(tmproot, dir1a)): dir1a = os.devnull if do3way: dir1b = os.path.join(dir1b, common_file) if not os.path.isfile(os.path.join(tmproot, dir1b)): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, child=dir2) def quote(match): key = match.group()[1:] if not do3way and key == 'parent2': return '' return util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent regex = '\$(parent2|parent1?|child)' if not do3way and not re.search(regex, args): args += ' $parent1 $child' args = re.sub(regex, quote, args) cmdline = util.shellquote(diffcmd) + ' ' + args ui.debug('running %r in %s\n' % (cmdline, tmproot)) util.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.path.getmtime(copy_fn) != mtime: ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
if s: raise util.Abort(s) else: raise util.Abort(_('patch failed to apply')) del fp # 4. We prepared working directory according to filtered patch. # Now is the time to delegate the job to commit/qrefresh or the like! # it is important to first chdir to repo root -- we'll call a # highlevel command with list of pathnames relative to repo root newfiles = [repo.wjoin(n) for n in newfiles] commitfunc(ui, repo, *newfiles, **opts) return 0 finally: # 5. finally restore backed-up files try: for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) os.unlink(tmpname) for realname, tmpname in newly_added_backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) os.unlink(tmpname) os.rmdir(backupdir) except OSError: pass return cmdutil.commit(ui, repo, recordfunc, pats, opts)
def dodiff(ui, repo, cmdline, pats, opts): '''Do the actual diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get('rev') change = opts.get('change') do3way = '$parent2' in cmdline if revs and change: msg = _('cannot specify --rev and --change at the same time') raise error.Abort(msg) elif change: node2 = scmutil.revsingle(repo, change, None).node() node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = scmutil.revpair(repo, revs) if not revs: node1b = repo.dirstate.p2() else: node1b = nullid # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False subrepos=opts.get('subrepos') matcher = scmutil.match(repo[node2], pats, opts) if opts.get('patch'): if subrepos: raise error.Abort(_('--patch cannot be used with --subrepos')) if node2 is None: raise error.Abort(_('--patch requires two revisions')) else: mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher, listsubrepos=subrepos)[:3]) if do3way: mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher, listsubrepos=subrepos)[:3]) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') try: if not opts.get('patch'): # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[0] rev1a = '@%d' % repo[node1a].rev() if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot, subrepos)[0] rev1b = '@%d' % repo[node1b].rev() else: dir1b = None rev1b = '' fns_and_mtime = [] # If node2 in not the wc or there is >1 change, copy it dir2root = '' rev2 = '' if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0] rev2 = '@%d' % repo[node2].rev() elif len(common) > 1: #we only actually need to get the files to copy back to #the working dir in this case (because the other cases #are: diffing 2 revisions or single file -- in which case #the file is already directly passed to the diff tool). dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot, subrepos) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 else: template = 'hg-%h.patch' cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()], template=repo.vfs.reljoin(tmproot, template), match=matcher) label1a = cmdutil.makefilename(repo, template, node1a) label2 = cmdutil.makefilename(repo, template, node2) dir1a = repo.vfs.reljoin(tmproot, label1a) dir2 = repo.vfs.reljoin(tmproot, label2) dir1b = None label1b = None fns_and_mtime = [] # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, 'plabel1': label1a, 'plabel2': label1b, 'clabel': label2, 'child': dir2, 'root': repo.root} def quote(match): pre = match.group(2) key = match.group(3) if not do3way and key == 'parent2': return pre return pre + util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent regex = (r'''(['"]?)([^\s'"$]*)''' r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') if not do3way and not re.search(regex, cmdline): cmdline += ' $parent1 $child' cmdline = re.sub(regex, quote, cmdline) ui.debug('running %r in %s\n' % (cmdline, tmproot)) ui.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.lstat(copy_fn).st_mtime != mtime: ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
def dorecord(ui, repo, commitfunc, *pats, **opts): try: if not ui.interactive(): raise util.Abort(_('running non-interactively, use commit instead')) except TypeError: # backwards compatibility with hg 1.1 if not ui.interactive: raise util.Abort(_('running non-interactively, use commit instead')) def recordfunc(ui, repo, message, match, opts): """This is generic record driver. Its job is to interactively filter local changes, and accordingly prepare working dir into a state, where the job can be delegated to non-interactive commit command such as 'commit' or 'qrefresh'. After the actual job is done by non-interactive command, working dir state is restored to original. In the end we'll record interesting changes, and everything else will be left in place, so the user can continue his work. """ merge = len(repo[None].parents()) > 1 if merge: raise util.Abort(_('cannot partially commit a merge ' '(use hg commit instead)')) # status gives back # modified, added, removed, deleted, unknown, ignored, clean # we take only the first 3 of these changes = repo.status(match=match)[:3] modified, added, removed = changes diffopts = opts.copy() diffopts['nodates'] = True diffopts['git'] = True diffopts = patch.diffopts(ui, opts=diffopts) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it chunks = crpatch.filterpatch(opts, crpatch.parsepatch(changes, fp), chunk_selector.chunkselector, ui) del fp contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass changed = changes[0] + changes[1] + changes[2] newfiles = [f for f in changed if f in contenders] if not newfiles: ui.status(_('no changes to record\n')) return 0 # 2. backup changed files, so we can restore them in the end backups = {} newly_added_backups = {} backupdir = repo.join('record-backups') try: os.mkdir(backupdir) except OSError, err: if err.errno != errno.EEXIST: raise try: # backup continues for f in newfiles: if f not in (modified + added): continue fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.', dir=backupdir) os.close(fd) ui.debug('backup %r as %r\n' % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) if f in modified: backups[f] = tmpname elif f in added: newly_added_backups[f] = tmpname fp = cStringIO.StringIO() all_backups = {} all_backups.update(backups) all_backups.update(newly_added_backups) for c in chunks: if c.filename() in all_backups: c.write(fp) dopatch = fp.tell() fp.seek(0) # 2.5 optionally review / modify patch in text editor if opts['crecord_reviewpatch']: patchtext = fp.read() reviewedpatch = ui.edit(patchtext, "") fp.truncate(0) fp.write(reviewedpatch) fp.seek(0) # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], lambda key: key in backups) # remove newly added files from 'clean' repo (so patch can apply) for f in newly_added_backups: os.unlink(f) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) if hasattr(patch, 'workingbackend'): # detect 1.9 patch.internalpatch(ui, repo, fp, strip=1, eolmode=None) else: pfiles = {} try: patch.internalpatch(ui, repo, fp, 1, eolmode=None) except (TypeError, AttributeError): # pre 17cea10c343e try: patch.internalpatch(ui, repo, fp, 1, repo.root, eolmode=None) except (TypeError, AttributeError): # pre 00a881581400 try: patch.internalpatch(fp, ui, 1, repo.root, files=pfiles, eolmode=None) except TypeError: # backwards compatible with hg 1.1 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles) try: cmdutil.updatedir(ui, repo, pfiles) except AttributeError: try: patch.updatedir(ui, repo, pfiles) except AttributeError: # from 00a881581400 onwards pass except patch.PatchError, err: s = str(err) if s: raise util.Abort(s) else: raise util.Abort(_('patch failed to apply')) del fp # 4. We prepared working directory according to filtered patch. # Now is the time to delegate the job to commit/qrefresh or the like! # it is important to first chdir to repo root -- we'll call a # highlevel command with list of pathnames relative to repo root newfiles = [repo.wjoin(n) for n in newfiles] commitfunc(ui, repo, *newfiles, **opts) return 0
def makebackup(ui, repo, dir, files): try: os.mkdir(dir) except OSError, err: if err.errno != errno.EEXIST: raise backups = {} for f in files: if os.path.isfile(repo.wjoin(f)): fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_') + '.', dir=dir) os.close(fd) ui.debug('backup %r as %r\n' % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) shutil.copystat(repo.wjoin(f), tmpname) backups[f] = tmpname return backups def getshelfpath(repo, name): if name: shelfpath = "shelves/" + name else: # Check if a shelf from an older version exists if os.path.isfile(repo.join('shelve')): shelfpath = 'shelve' else: shelfpath = "shelves/default"
def shelvefunc(ui, repo, message, match, opts): files = [] if match.files(): changes = None else: changes = repo.status(match = match)[:3] modified, added, removed = changes files = modified + added + removed match = cmdutil.matchfiles(repo, files) diffopts = repo.attic.diffopts( {'git':True, 'nodates':True}) chunks = patch.diff(repo, repo.dirstate.parents()[0], match = match, changes = changes, opts = diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it ac = record.parsepatch(fp) chunks = record.filterpatch(ui, ac) # and a not-intending-to apply subset of it rc = refilterpatch(ac, chunks) del fp contenders = {} for h in chunks: try: contenders.update(dict.fromkeys(h.files())) except AttributeError: pass newfiles = [f for f in files if f in contenders] if not newfiles: ui.status(_('no changes to shelve\n')) return 0 modified = dict.fromkeys(changes[0]) backups = {} backupdir = repo.join('shelve-backups') try: bkfiles = [f for f in newfiles if f in modified] backups = makebackup(ui, repo, backupdir, bkfiles) # patch to shelve sp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(sp) doshelve = sp.tell() sp.seek(0) # patch to apply to shelved files fp = cStringIO.StringIO() for c in rc: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) try: # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], backups.has_key) # 3b. apply filtered patch to clean repo (apply) if dopatch: ui.debug(_('applying patch\n')) ui.debug(fp.getvalue()) patch.internalpatch(fp, ui, 1, repo.root) del fp # 3c. apply filtered patch to clean repo (shelve) if doshelve: ui.debug(_("saving patch to %s\n") % (name)) s = repo.attic f = s.opener(name, 'w') f.write(sp.getvalue()) del f s.currentpatch = name s.persiststate() del sp except: try: for realname, tmpname in backups.iteritems(): ui.debug(_('restoring %r to %r\n') % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) except OSError: pass return 0 finally: try: for realname, tmpname in backups.iteritems(): ui.debug(_('removing backup for %r : %r\n') % (realname, tmpname)) os.unlink(tmpname) os.rmdir(backupdir) except OSError: pass
def unshelve(ui, repo, **opts): '''restore shelved changes''' # Shelf name and path shelfname = opts.get('name') shelfpath = getshelfpath(repo, shelfname) # List all the active shelves by name and return ' if opts['list']: listshelves(ui, repo) return try: patch_diff = repo.opener(shelfpath).read() fp = cStringIO.StringIO(patch_diff) if opts['inspect']: # wrap ui.write so diff output can be labeled/colorized def wrapwrite(orig, *args, **kw): label = kw.pop('label', '') if label: label += ' ' for chunk, l in patch.difflabel(lambda: args): orig(chunk, label=label + l) oldwrite = ui.write extensions.wrapfunction(ui, 'write', wrapwrite) try: ui.status(fp.getvalue()) finally: ui.write = oldwrite else: files = [] ac = parsepatch(fp) for chunk in ac: if isinstance(chunk, header): files += chunk.files() backupdir = repo.join('shelve-backups') backups = makebackup(ui, repo, backupdir, set(files)) ui.debug('applying shelved patch\n') patchdone = 0 try: try: fp.seek(0) patch.internalpatch(ui, repo, fp, 1, eolmode=None) patchdone = 1 except: if opts['force']: patchdone = 1 else: ui.status('restoring backup files\n') for realname, tmpname in backups.iteritems(): ui.debug('restoring %r to %r\n' % (tmpname, realname)) util.copyfile(tmpname, repo.wjoin(realname)) finally: try: ui.debug('removing backup files\n') shutil.rmtree(backupdir, True) except OSError: pass if patchdone: ui.debug("removing shelved patches\n") os.unlink(repo.join(shelfpath)) ui.status("unshelve completed\n") except IOError: ui.warn('nothing to unshelve\n')
def dodiff(): assert not (hascopies and len(MAR) > 1), \ 'dodiff cannot handle copies when diffing dirs' sa = [mod_a, add_a, rem_a] sb = [mod_b, add_b, rem_b] ctxs = [ctx1a, ctx1b, ctx2] # If more than one file, diff on working dir copy. copyworkingdir = len(MAR) > 1 dirs, labels, fns_and_mtimes = snapshotset(repo, ctxs, sa, sb, cpy, copyworkingdir) dir1a, dir1b, dir2 = dirs label1a, label1b, label2 = labels fns_and_mtime = fns_and_mtimes[2] if len(MAR) > 1 and label2 == '': label2 = 'working files' def getfile(fname, dir, label): file = os.path.join(qtlib.gettempdir(), dir, fname) if os.path.isfile(file): return fname + label, file nullfile = os.path.join(qtlib.gettempdir(), 'empty') fp = open(nullfile, 'w') fp.close() return (hglib.fromunicode(_nonexistant, 'replace') + label, nullfile) # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(MAR) == 1: file2 = MAR.pop() file2local = util.localpath(file2) if file2 in cto: file1 = util.localpath(cpy[file2]) else: file1 = file2 label1a, dir1a = getfile(file1, dir1a, label1a) if do3way: label1b, dir1b = getfile(file1, dir1b, label1b) label2, dir2 = getfile(file2local, dir2, label2) if do3way: label1a += '[local]' label1b += '[other]' label2 += '[merged]' replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b, plabel1=label1a, plabel2=label1b, phash1=str(ctx1a), phash2=str(ctx1b), repo=hglib.fromunicode(repo.displayname), clabel=label2, child=dir2, chash=str(ctx2)) launchtool(diffcmd, args, replace, True) # detect if changes were made to mirrored working files for copy_fn, working_fn, mtime in fns_and_mtime: try: if os.lstat(copy_fn).st_mtime != mtime: ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) except EnvironmentError: pass # Ignore I/O errors or missing files
def dodiff(ui, repo, cmdline, pats, opts): '''Do the actual diff: - copy to a temp structure if diffing 2 internal revisions - copy to a temp structure if diffing working revision with another one and more than 1 file is changed - just invoke the diff for a single file in the working dir ''' revs = opts.get('rev') change = opts.get('change') do3way = '$parent2' in cmdline if revs and change: msg = _('cannot specify --rev and --change at the same time') raise util.Abort(msg) elif change: node2 = scmutil.revsingle(repo, change, None).node() node1a, node1b = repo.changelog.parents(node2) else: node1a, node2 = scmutil.revpair(repo, revs) if not revs: node1b = repo.dirstate.p2() else: node1b = nullid # Disable 3-way merge if there is only one parent if do3way: if node1b == nullid: do3way = False matcher = scmutil.match(repo[node2], pats, opts) mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3]) if do3way: mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3]) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 tmproot = tempfile.mkdtemp(prefix='extdiff.') try: # Always make a copy of node1a (and node1b, if applicable) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0] rev1a = '@%d' % repo[node1a].rev() if do3way: dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0] rev1b = '@%d' % repo[node1b].rev() else: dir1b = None rev1b = '' fns_and_mtime = [] # If node2 in not the wc or there is >1 change, copy it dir2root = '' rev2 = '' if node2: dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0] rev2 = '@%d' % repo[node2].rev() elif len(common) > 1: #we only actually need to get the files to copy back to #the working dir in this case (because the other cases #are: diffing 2 revisions or single file -- in which case #the file is already directly passed to the diff tool). dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot) else: # This lets the diff tool open the changed file directly dir2 = '' dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = os.devnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = os.devnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 # Function to quote file/dir names in the argument string. # When not operating in 3-way mode, an empty string is # returned for parent2 replace = { 'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, 'plabel1': label1a, 'plabel2': label1b, 'clabel': label2, 'child': dir2, 'root': repo.root } def quote(match): pre = match.group(2) key = match.group(3) if not do3way and key == 'parent2': return pre return pre + util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent regex = (r'''(['"]?)([^\s'"$]*)''' r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') if not do3way and not re.search(regex, cmdline): cmdline += ' $parent1 $child' cmdline = re.sub(regex, quote, cmdline) ui.debug('running %r in %s\n' % (cmdline, tmproot)) ui.system(cmdline, cwd=tmproot) for copy_fn, working_fn, mtime in fns_and_mtime: if os.lstat(copy_fn).st_mtime != mtime: ui.debug('file changed while diffing. ' 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)) util.copyfile(copy_fn, working_fn) return 1 finally: ui.note(_('cleaning up temp directory\n')) shutil.rmtree(tmproot)
def dorecord(ui, repo, commitfunc, *pats, **opts): if not ui.interactive(): raise util.Abort(_('running non-interactively, use commit instead')) def recordfunc(ui, repo, message, match, opts): """This is generic record driver. Its job is to interactively filter local changes, and accordingly prepare working directory into a state in which the job can be delegated to a non-interactive commit command such as 'commit' or 'qrefresh'. After the actual job is done by non-interactive command, the working directory is restored to its original state. In the end we'll record interesting changes, and everything else will be left in place, so the user can continue working. """ merge = len(repo[None].parents()) > 1 if merge: raise util.Abort( _('cannot partially commit a merge ' '(use "hg commit" instead)')) changes = repo.status(match=match)[:3] diffopts = mdiff.diffopts(git=True, nodates=True) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) fp.seek(0) # 1. filter patch, so we have intending-to apply subset of it chunks = filterpatch(ui, parsepatch(fp)) del fp contenders = set() for h in chunks: try: contenders.update(set(h.files())) except AttributeError: pass changed = changes[0] + changes[1] + changes[2] newfiles = [f for f in changed if f in contenders] if not newfiles: ui.status(_('no changes to record\n')) return 0 modified = set(changes[0]) # 2. backup changed files, so we can restore them in the end backups = {} backupdir = repo.join('record-backups') try: os.mkdir(backupdir) except OSError, err: if err.errno != errno.EEXIST: raise try: # backup continues for f in newfiles: if f not in modified: continue fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_') + '.', dir=backupdir) os.close(fd) ui.debug('backup %r as %r\n' % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) shutil.copystat(repo.wjoin(f), tmpname) backups[f] = tmpname fp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.parents()[0], lambda key: key in backups) # 3b. (apply) if dopatch: try: ui.debug('applying patch\n') ui.debug(fp.getvalue()) pfiles = {} patch.internalpatch(fp, ui, 1, repo.root, files=pfiles, eolmode=None) cmdutil.updatedir(ui, repo, pfiles) except patch.PatchError, err: raise util.Abort(str(err)) del fp # 4. We prepared working directory according to filtered # patch. Now is the time to delegate the job to # commit/qrefresh or the like! # it is important to first chdir to repo root -- we'll call # a highlevel command with list of pathnames relative to # repo root cwd = os.getcwd() os.chdir(repo.root) try: commitfunc(ui, repo, *newfiles, **opts) finally: os.chdir(cwd) return 0
backups = {} if tobackup: backupdir = repo.join("record-backups") try: os.mkdir(backupdir) except OSError, err: if err.errno != errno.EEXIST: raise try: # backup continues for f in tobackup: fd, tmpname = tempfile.mkstemp(prefix=f.replace("/", "_") + ".", dir=backupdir) os.close(fd) ui.debug("backup %r as %r\n" % (f, tmpname)) util.copyfile(repo.wjoin(f), tmpname) shutil.copystat(repo.wjoin(f), tmpname) backups[f] = tmpname fp = cStringIO.StringIO() for c in chunks: if c.filename() in backups: c.write(fp) dopatch = fp.tell() fp.seek(0) # 3a. apply filtered patch to clean repo (clean) if backups: hg.revert(repo, repo.dirstate.p1(), lambda key: key in backups) # 3b. (apply)
def diffrevs( ui, repo, ctx1a, ctx1b, ctx2, matcher, tmproot, cmdline, do3way, guitool, opts, ): subrepos = opts.get(b'subrepos') # calculate list of files changed between both revs st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos) mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed) if do3way: stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos) mod_b, add_b, rem_b = ( set(stb.modified), set(stb.added), set(stb.removed), ) else: mod_b, add_b, rem_b = set(), set(), set() modadd = mod_a | add_a | mod_b | add_b common = modadd | rem_a | rem_b if not common: return 0 # Always make a copy of ctx1a (and ctx1b, if applicable) # dir1a should contain files which are: # * modified or removed from ctx1a to ctx2 # * modified or added from ctx1b to ctx2 # (except file added from ctx1a to ctx2 as they were not present in # ctx1a) dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a) dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0] rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev() if do3way: # file calculation criteria same as dir1a dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b) dir1b = snapshot( ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos )[0] rev1b = b'@%d' % ctx1b.rev() else: dir1b = None rev1b = b'' fnsandstat = [] # If ctx2 is not the wc or there is >1 change, copy it dir2root = b'' rev2 = b'' if ctx2.node() is not None: dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0] rev2 = b'@%d' % ctx2.rev() elif len(common) > 1: # we only actually need to get the files to copy back to # the working dir in this case (because the other cases # are: diffing 2 revisions or single file -- in which case # the file is already directly passed to the diff tool). dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos) else: # This lets the diff tool open the changed file directly dir2 = b'' dir2root = repo.root label1a = rev1a label1b = rev1b label2 = rev2 if not opts.get(b'per_file'): # If only one change, diff the files instead of the directories # Handle bogus modifies correctly by checking if the files exist if len(common) == 1: common_file = util.localpath(common.pop()) dir1a = os.path.join(tmproot, dir1a, common_file) label1a = common_file + rev1a if not os.path.isfile(dir1a): dir1a = pycompat.osdevnull if do3way: dir1b = os.path.join(tmproot, dir1b, common_file) label1b = common_file + rev1b if not os.path.isfile(dir1b): dir1b = pycompat.osdevnull dir2 = os.path.join(dir2root, dir2, common_file) label2 = common_file + rev2 # Run the external tool on the 2 temp directories or the patches cmdline = formatcmdline( cmdline, repo.root, do3way=do3way, parent1=dir1a, plabel1=label1a, parent2=dir1b, plabel2=label1b, child=dir2, clabel=label2, ) ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff') else: # Run the external tool once for each pair of files _runperfilediff( cmdline, repo.root, ui, guitool=guitool, do3way=do3way, confirm=opts.get(b'confirm'), commonfiles=common, tmproot=tmproot, dir1a=os.path.join(tmproot, dir1a), dir1b=os.path.join(tmproot, dir1b) if do3way else None, dir2=os.path.join(dir2root, dir2), rev1a=rev1a, rev1b=rev1b, rev2=rev2, ) for copy_fn, working_fn, st in fnsandstat: cpstat = os.lstat(copy_fn) # Some tools copy the file and attributes, so mtime may not detect # all changes. A size check will detect more cases, but not all. # The only certain way to detect every case is to diff all files, # which could be expensive. # copyfile() carries over the permission, so the mode check could # be in an 'elif' branch, but for the case where the file has # changed without affecting mtime or size. if ( cpstat[stat.ST_MTIME] != st[stat.ST_MTIME] or cpstat.st_size != st.st_size or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100) ): ui.debug( b'file changed while diffing. ' b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn) ) util.copyfile(copy_fn, working_fn) return 1