def pullnarrowcmd(orig, ui, repo, *args, **opts): """Wraps pull command to allow modifying narrow spec.""" wrappedextraprepare = util.nullcontextmanager() if repository.NARROW_REQUIREMENT in repo.requirements: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) if opts.get(r'depth'): kwargs['depth'] = opts[r'depth'] wrappedextraprepare = extensions.wrappedfunction(exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) with wrappedextraprepare: return orig(ui, repo, *args, **opts)
def clonenarrowcmd(orig, ui, repo, *args, **opts): """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" opts = pycompat.byteskwargs(opts) wrappedextraprepare = util.nullcontextmanager() opts_narrow = opts['narrow'] if opts_narrow: def pullbundle2extraprepare_widen(orig, pullop, kwargs): # Create narrow spec patterns from clone flags includepats = narrowspec.parsepatterns(opts['include']) excludepats = narrowspec.parsepatterns(opts['exclude']) # If necessary, ask the server to expand the narrowspec. includepats, excludepats = expandpull(pullop, includepats, excludepats) if not includepats and excludepats: # If nothing was included, we assume the user meant to include # everything, except what they asked to exclude. includepats = {'path:.'} pullop.repo.setnarrowpats(includepats, excludepats) # This will populate 'includepats' etc with the values from the # narrowspec we just saved. orig(pullop, kwargs) if opts.get('depth'): kwargs['depth'] = opts['depth'] wrappedextraprepare = extensions.wrappedfunction( exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) def pullnarrow(orig, repo, *args, **kwargs): if opts_narrow: repo.requirements.add(changegroup.NARROW_REQUIREMENT) repo._writerequirements() return orig(repo, *args, **kwargs) wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) with wrappedextraprepare, wrappedpull: return orig(ui, repo, *args, **pycompat.strkwargs(opts))
def clonenarrowcmd(orig, ui, repo, *args, **opts): """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" opts = pycompat.byteskwargs(opts) wrappedextraprepare = util.nullcontextmanager() narrowspecfile = opts[b'narrowspec'] if narrowspecfile: filepath = os.path.join(encoding.getcwd(), narrowspecfile) ui.status(_(b"reading narrowspec from '%s'\n") % filepath) try: fdata = util.readfile(filepath) except IOError as inst: raise error.Abort( _(b"cannot read narrowspecs from '%s': %s") % (filepath, encoding.strtolocal(inst.strerror))) includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') if profiles: raise error.Abort( _(b"cannot specify other files using '%include' in" b" narrowspec")) narrowspec.validatepatterns(includes) narrowspec.validatepatterns(excludes) # narrowspec is passed so we should assume that user wants narrow clone opts[b'narrow'] = True opts[b'include'].extend(includes) opts[b'exclude'].extend(excludes) if opts[b'narrow']: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) if opts.get(b'depth'): kwargs[b'depth'] = opts[b'depth'] wrappedextraprepare = extensions.wrappedfunction( exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen) with wrappedextraprepare: return orig(ui, repo, *args, **pycompat.strkwargs(opts))
def handlechangegroup_widen(op, inpart): """Changegroup exchange handler which restores temporarily-stripped nodes""" # We saved a bundle with stripped node data we must now restore. # This approach is based on mercurial/repair.py@6ee26a53c111. repo = op.repo ui = op.ui chgrpfile = op._widen_bundle del op._widen_bundle vfs = repo.vfs ui.note(_(b"adding branch\n")) f = vfs.open(chgrpfile, b"rb") try: gen = exchange.readbundle(ui, f, chgrpfile, vfs) # silence internal shuffling chatter maybe_silent = (ui.silent() if not ui.verbose else util.nullcontextmanager()) with maybe_silent: if isinstance(gen, bundle2.unbundle20): with repo.transaction(b'strip') as tr: bundle2.processbundle(repo, gen, lambda: tr) else: gen.apply(repo, b'strip', b'bundle:' + vfs.join(chgrpfile), True) finally: f.close() # remove undo files for undovfs, undofile in repo.undofiles(): try: undovfs.unlink(undofile) except OSError as e: if e.errno != errno.ENOENT: ui.warn( _(b'error removing %s: %s\n') % (undovfs.join(undofile), stringutil.forcebytestr(e))) # Remove partial backup only if there were no exceptions op._widen_uninterr.__exit__(None, None, None) vfs.unlink(chgrpfile)
def status( self, node1=b'.', node2=None, match=None, ignored=False, clean=False, unknown=False, listsubrepos=False, ): listignored, listclean, listunknown = ignored, clean, unknown orig = super(lfilesrepo, self).status if not self.lfstatus: return orig( node1, node2, match, listignored, listclean, listunknown, listsubrepos, ) # some calls in this function rely on the old version of status self.lfstatus = False ctx1 = self[node1] ctx2 = self[node2] working = ctx2.rev() is None parentworking = working and ctx1 == self[b'.'] if match is None: match = matchmod.always() try: # updating the dirstate is optional # so we don't wait on the lock wlock = self.wlock(False) gotlock = True except error.LockError: wlock = util.nullcontextmanager() gotlock = False with wlock: # First check if paths or patterns were specified on the # command line. If there were, and they don't match any # largefiles, we should just bail here and let super # handle it -- thus gaining a big performance boost. lfdirstate = lfutil.openlfdirstate(ui, self) if not match.always(): for f in lfdirstate: if match(f): break else: return orig( node1, node2, match, listignored, listclean, listunknown, listsubrepos, ) # Create a copy of match that matches standins instead # of largefiles. def tostandins(files): if not working: return files newfiles = [] dirstate = self.dirstate for f in files: sf = lfutil.standin(f) if sf in dirstate: newfiles.append(sf) elif dirstate.hasdir(sf): # Directory entries could be regular or # standin, check both newfiles.extend((f, sf)) else: newfiles.append(f) return newfiles m = copy.copy(match) m._files = tostandins(m._files) result = orig(node1, node2, m, ignored, clean, unknown, listsubrepos) if working: def sfindirstate(f): sf = lfutil.standin(f) dirstate = self.dirstate return sf in dirstate or dirstate.hasdir(sf) match._files = [f for f in match._files if sfindirstate(f)] # Don't waste time getting the ignored and unknown # files from lfdirstate unsure, s = lfdirstate.status( match, subrepos=[], ignored=False, clean=listclean, unknown=False, ) (modified, added, removed, deleted, clean) = ( s.modified, s.added, s.removed, s.deleted, s.clean, ) if parentworking: for lfile in unsure: standin = lfutil.standin(lfile) if standin not in ctx1: # from second parent modified.append(lfile) elif lfutil.readasstandin( ctx1[standin]) != lfutil.hashfile( self.wjoin(lfile)): modified.append(lfile) else: if listclean: clean.append(lfile) lfdirstate.normal(lfile) else: tocheck = unsure + modified + added + clean modified, added, clean = [], [], [] checkexec = self.dirstate._checkexec for lfile in tocheck: standin = lfutil.standin(lfile) if standin in ctx1: abslfile = self.wjoin(lfile) if (lfutil.readasstandin(ctx1[standin]) != lfutil.hashfile(abslfile) ) or ( checkexec and (b'x' in ctx1.flags(standin)) != bool( lfutil.getexecutable(abslfile))): modified.append(lfile) elif listclean: clean.append(lfile) else: added.append(lfile) # at this point, 'removed' contains largefiles # marked as 'R' in the working context. # then, largefiles not managed also in the target # context should be excluded from 'removed'. removed = [ lfile for lfile in removed if lfutil.standin(lfile) in ctx1 ] # Standins no longer found in lfdirstate have been deleted for standin in ctx1.walk(lfutil.getstandinmatcher(self)): lfile = lfutil.splitstandin(standin) if not match(lfile): continue if lfile not in lfdirstate: deleted.append(lfile) # Sync "largefile has been removed" back to the # standin. Removing a file as a side effect of # running status is gross, but the alternatives (if # any) are worse. self.wvfs.unlinkpath(standin, ignoremissing=True) # Filter result lists result = list(result) # Largefiles are not really removed when they're # still in the normal dirstate. Likewise, normal # files are not really removed if they are still in # lfdirstate. This happens in merges where files # change type. removed = [f for f in removed if f not in self.dirstate] result[2] = [f for f in result[2] if f not in lfdirstate] lfiles = set(lfdirstate) # Unknown files result[4] = set(result[4]).difference(lfiles) # Ignored files result[5] = set(result[5]).difference(lfiles) # combine normal files and largefiles normals = [[ fn for fn in filelist if not lfutil.isstandin(fn) ] for filelist in result] lfstatus = ( modified, added, removed, deleted, [], [], clean, ) result = [ sorted(list1 + list2) for (list1, list2) in zip(normals, lfstatus) ] else: # not against working directory result = [[lfutil.splitstandin(f) or f for f in items] for items in result] if gotlock: lfdirstate.write() self.lfstatus = True return scmutil.status(*result)