def dumps(obj, paranoid=True): if obj is None: return "null" elif obj is False: return "false" elif obj is True: return "true" elif isinstance(obj, (int, long, float)): return pycompat.bytestr(obj) elif isinstance(obj, bytes): return '"%s"' % encoding.jsonescape(obj, paranoid=paranoid) elif isinstance(obj, str): return _sysjson.dumps(obj) elif util.safehasattr(obj, "keys"): out = [] for k, v in sorted(pycompat.iteritems(obj)): if isinstance(k, bytes): key = '"%s"' % encoding.jsonescape(k, paranoid=paranoid) else: key = _sysjson.dumps(k) out.append(key + ": %s" % dumps(v, paranoid)) return "{" + ", ".join(out) + "}" elif util.safehasattr(obj, "__iter__"): out = [dumps(i, paranoid) for i in obj] return "[" + ", ".join(out) + "]" else: raise TypeError("cannot encode type %s" % obj.__class__.__name__)
def populateresponseforphab(repo, diffnum): """:populateresponse: Runs the memoization function for use of phabstatus and sync status """ if not hgutil.safehasattr(repo, "_phabstatusrevs"): return if (hgutil.safehasattr(repo, "_phabstatuscache") and (repo, diffnum) in repo._phabstatuscache): # We already have cached data for this diff return next_revs = repo._phabstatusrevs.peekahead() if repo._phabstatusrevs.done: # repo._phabstatusrevs doesn't have anything else to process. # Remove it so we will bail out earlier next time. del repo._phabstatusrevs alldiffnumbers = [ getdiffnum(repo, repo.unfiltered()[rev]) for rev in next_revs ] okdiffnumbers = set(d for d in alldiffnumbers if d is not None) # Make sure we always include the requested diff number okdiffnumbers.add(diffnum) # To populate the cache, the result will be used by the templater getdiffstatus(repo, *okdiffnumbers)
def updatebookmarks(repo, changes, name="git_handler"): """abstract writing bookmarks for backwards compatibility""" bms = repo._bookmarks tr = lock = wlock = None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction(name) if hgutil.safehasattr(bms, "applychanges"): # applychanges was added in mercurial 4.3 bms.applychanges(repo, tr, changes) else: for name, node in changes: if node is None: del bms[name] else: bms[name] = node if hgutil.safehasattr(bms, "recordchange"): # recordchange was added in mercurial 3.2 bms.recordchange(tr) else: bms.write() tr.close() finally: lockmod.release(tr, lock, wlock)
def dumps(obj, paranoid=True): if obj is None: return "null" elif obj is False: return "false" elif obj is True: return "true" elif isinstance(obj, (int, long, float)): return pycompat.bytestr(obj) elif isinstance(obj, bytes): return '"%s"' % encoding.jsonescape(obj, paranoid=paranoid) elif isinstance(obj, str): # This branch is unreachable on Python 2, because bytes == str # and we'll return in the next-earlier block in the elif # ladder. On Python 3, this helps us catch bugs before they # hurt someone. raise error.ProgrammingError( "Mercurial only does output with bytes on Python 3: %r" % obj) elif util.safehasattr(obj, "keys"): out = [ '"%s": %s' % (encoding.jsonescape(k, paranoid=paranoid), dumps(v, paranoid)) for k, v in sorted(obj.iteritems()) ] return "{" + ", ".join(out) + "}" elif util.safehasattr(obj, "__iter__"): out = [dumps(i, paranoid) for i in obj] return "[" + ", ".join(out) + "]" else: raise TypeError("cannot encode type %s" % obj.__class__.__name__)
def clearcaches(cl): # behave somewhat consistently across internal API changes if util.safehasattr(cl, "clearcaches"): cl.clearcaches() elif util.safehasattr(cl, "_nodecache"): from edenscm.mercurial.node import nullid, nullrev cl._nodecache = {nullid: nullrev} cl._nodepos = None
class hgrepo(baseclass): if hgutil.safehasattr(localrepo.localrepository, "pull"): # Mercurial < 3.2 @util.transform_notgit def pull(self, remote, heads=None, force=False): if isinstance(remote, gitrepo): return self.githandler.fetch(remote.path, heads) else: # pragma: no cover return super(hgrepo, self).pull(remote, heads, force) if hgutil.safehasattr(localrepo.localrepository, "push"): # Mercurial < 3.2 @util.transform_notgit def push(self, remote, force=False, revs=None): if isinstance(remote, gitrepo): return self.githandler.push(remote.path, revs, force) else: # pragma: no cover return super(hgrepo, self).push(remote, force, revs) @util.transform_notgit def findoutgoing(self, remote, base=None, heads=None, force=False): if isinstance(remote, gitrepo): base, heads = self.githandler.get_refs(remote.path) out, h = super(hgrepo, self).findoutgoing(remote, base, heads, force) return out else: # pragma: no cover return super(hgrepo, self).findoutgoing(remote, base, heads, force) def _findtags(self): (tags, tagtypes) = super(hgrepo, self)._findtags() for tag, rev in self.githandler.tags.iteritems(): if isinstance(tag, unicode): tag = tag.encode("utf-8") tags[tag] = bin(rev) tagtypes[tag] = "git" for tag, rev in self.githandler.remote_refs.iteritems(): if isinstance(tag, unicode): tag = tag.encode("utf-8") tags[tag] = rev tagtypes[tag] = "git-remote" tags.update(self.githandler.remote_refs) return (tags, tagtypes) @hgutil.propertycache def githandler(self): """get the GitHandler for an hg repo This only makes sense if the repo talks to at least one git remote. """ return GitHandler(self, self.ui) def tags(self): return {}
def use(it): # Direct call to base method shared between client and server. print(it.hello()) # Direct calls to proxied methods. They cause individual roundtrips. print(it.foo("Un", two="Deux")) print(it.bar("Eins", "Zwei")) # Batched call to a couple of proxied methods. batch = it.batchiter() # The calls return futures to eventually hold results. foo = batch.foo(one="One", two="Two") bar = batch.bar("Eins", "Zwei") bar2 = batch.bar(b="Uno", a="Due") # Future shouldn't be set until we submit(). assert isinstance(foo, peer.future) assert not util.safehasattr(foo, "value") assert not util.safehasattr(bar, "value") batch.submit() # Call results() to obtain results as a generator. results = batch.results() # Future results shouldn't be set until we consume a value. assert not util.safehasattr(foo, "value") foovalue = next(results) assert util.safehasattr(foo, "value") assert foovalue == foo.value print(foo.value) next(results) print(bar.value) next(results) print(bar2.value) # We should be at the end of the results generator. try: next(results) except StopIteration: print("proper end of results generator") else: print("extra emitted element!") # Attempting to call a non-batchable method inside a batch fails. batch = it.batchiter() try: batch.greet(name="John Smith") except error.ProgrammingError as e: print(e) # Attempting to call a local method inside a batch fails. batch = it.batchiter() try: batch.hello() except error.ProgrammingError as e: print(e)
def _racedetect(orig, self, other, s, match, listignored, listclean, listunknown): repo = self._repo detectrace = repo.ui.configbool( "fsmonitor", "detectrace") or util.parsebool( encoding.environ.get("HGDETECTRACE", "")) if detectrace and util.safehasattr(repo.dirstate._fs, "_watchmanclient"): state = repo.dirstate._fs._fsmonitorstate try: startclock = repo.dirstate._fs._watchmanclient.command( "clock", {"sync_timeout": int(state.timeout * 1000)})["clock"] except Exception as ex: repo.ui.warn(_("cannot detect status race: %s\n") % ex) detectrace = False result = orig(self, other, s, match, listignored, listclean, listunknown) if detectrace and util.safehasattr(repo.dirstate._fs, "_fsmonitorstate"): raceresult = repo._watchmanclient.command( "query", { "fields": ["name"], "since": startclock, "expression": [ "allof", ["type", "f"], ["not", ["anyof", ["dirname", ".hg"]]], ], "sync_timeout": int(state.timeout * 1000), "empty_on_fresh_instance": True, }, ) ignore = repo.dirstate._ignore racenames = [ name for name in raceresult["files"] # hg-checklink*, hg-checkexec* are ignored. # Ignored files are allowed unless listignored is set. if not name.startswith("hg-check") and ( listignored or not ignore(name)) ] if racenames: msg = _( "[race-detector] files changed when scanning changes in working copy:\n%s" ) % "".join(" %s\n" % name for name in sorted(racenames)) raise error.WorkingCopyRaced( msg, hint= _("this is an error because HGDETECTRACE or fsmonitor.detectrace is set to true" ), ) return result
def uisetup(ui): if util.safehasattr(localrepo, "newreporequirements"): extensions.wrapfunction(localrepo, "newreporequirements", requirements) else: @replaceclass(localrepo, "localrepository") class lz4repo(localrepo.localrepository): def _baserequirements(self, create): reqs = super(lz4repo, self)._baserequirements(create) if create and self.ui.configbool("format", "uselz4", True): reqs.append("lz4revlog") return reqs @replaceclass(revlog, "revlog") class lz4revlog(revlog.revlog): def __init__(self, opener, indexfile, **kwds): super(lz4revlog, self).__init__(opener, indexfile, **kwds) opts = getattr(opener, "options", None) self._lz4 = opts and "lz4revlog" in opts def compress(self, text): if util.safehasattr(self, "_lz4") and self._lz4: if not text: return ("", text) c = lz4compresshc(text) if len(text) <= len(c): if text[0] == "\0": return ("", text) return ("u", text) return ("", "4" + c) return super(lz4revlog, self).compress(text) def decompress(self, bin): if not bin: return bin t = bin[0] if t == "\0": return bin if t == "4": return lz4decompress(bin[1:]) return super(lz4revlog, self).decompress(bin) cls = localrepo.localrepository for reqs in "supportedformats openerreqs".split(): getattr(cls, reqs).add("lz4revlog") if util.safehasattr(cls, "_basesupported"): # hg >= 2.8. Since we're good at falling back to the usual revlog, we # aren't going to bother with enabling ourselves per-repository. cls._basesupported.add("lz4revlog") else: # hg <= 2.7 cls.supported.add("lz4revlog")
def uisetup(ui): if util.safehasattr(cmdutil, "openrevlog") and not util.safehasattr( commands, "debugrevlogopts"): # for "historical portability": # In this case, Mercurial should be 1.9 (or a79fea6b3e77) - # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for # openrevlog() should cause failure, because it has been # available since 3.5 (or 49c583ca48c4). def openrevlog(orig, repo, cmd, file_, opts): if opts.get("dir") and not util.safehasattr(repo, "dirlog"): raise error.Abort("This version doesn't support --dir option", hint="use 3.5 or later") return orig(repo, cmd, file_, opts) extensions.wrapfunction(cmdutil, "openrevlog", openrevlog)
def safeattrsetter(obj, name, ignoremissing=False): """Ensure that 'obj' has 'name' attribute before subsequent setattr This function is aborted, if 'obj' doesn't have 'name' attribute at runtime. This avoids overlooking removal of an attribute, which breaks assumption of performance measurement, in the future. This function returns the object to (1) assign a new value, and (2) restore an original value to the attribute. If 'ignoremissing' is true, missing 'name' attribute doesn't cause abortion, and this function returns None. This is useful to examine an attribute, which isn't ensured in all Mercurial versions. """ if not util.safehasattr(obj, name): if ignoremissing: return None raise error.Abort(("missing attribute %s of %s might break assumption" " of performance measurement") % (name, obj)) origvalue = getattr(obj, name) class attrutil(object): def set(self, newvalue): setattr(obj, name, newvalue) def restore(self): setattr(obj, name, origvalue) return attrutil()
def pullremotenames(repo, remote, bookmarks): # when working between multiple local repos which do not all have # remotenames enabled, do this work only for those with it enabled if not util.safehasattr(repo, "_remotenames"): return path = activepath(repo.ui, remote) if path: # on a push, we don't want to keep obsolete heads since # they won't show up as heads on the next pull, so we # remove them here otherwise we would require the user # to issue a pull to refresh .hg/remotenames saveremotenames(repo, {path: bookmarks}) # repo.ui.paths.get(path) might be empty during clone. if repo.ui.paths.get(path): # Collect selected bookmarks that point to unknown commits. This # indicates a race condition. selected = set(selectivepullbookmarknames(repo, path)) hasnode = repo.changelog.hasnode movedbookmarks = [ name for name, hexnode in bookmarks.items() if name in selected and hexnode and not hasnode(bin(hexnode)) ] # Those bookmarks have moved since pull. Pull them again. if movedbookmarks: repo.pull(path, bookmarknames=movedbookmarks) precachedistance(repo)
def _getlogrevs(orig, repo, pats, opts): # Call the original function revs, expr, filematcher = orig(repo, pats, opts) # Wrap the revs result so that iter(revs) returns a PeekaheadRevsetIter() # the first time it is invoked, and sets repo._phabstatusrevs so that the # phabstatus code will be able to peek ahead at the revs to be logged. orig_type = revs.__class__ class wrapped_class(type(revs)): def __iter__(self): # The first time __iter__() is called, return a # PeekaheadRevsetIter(), and assign it to repo._phabstatusrevs revs.__class__ = orig_type # By default, peek ahead 30 revisions at a time peekahead = repo.ui.configint("phabstatus", "logpeekahead", 30) repo._phabstatusrevs = PeekaheadRevsetIter(revs, peekahead) return repo._phabstatusrevs _is_phabstatus_wrapped = True if not hgutil.safehasattr(revs, "_is_phabstatus_wrapped"): revs.__class__ = wrapped_class return revs, expr, filematcher
def reposetup(ui, repo): client = ui.configbool("fastannotate", "client", default=None) if client is None: if util.safehasattr(repo, "requirements"): client = "remotefilelog" in repo.requirements if client: protocol.clientreposetup(ui, repo)
def extsetup(ui): origpushkeyhandler = bundle2.parthandlermapping["pushkey"] def newpushkeyhandler(*args, **kwargs): bundle2pushkey(origpushkeyhandler, *args, **kwargs) newpushkeyhandler.params = origpushkeyhandler.params bundle2.parthandlermapping["pushkey"] = newpushkeyhandler orighandlephasehandler = bundle2.parthandlermapping["phase-heads"] newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases( orighandlephasehandler, *args, **kwargs ) newphaseheadshandler.params = orighandlephasehandler.params bundle2.parthandlermapping["phase-heads"] = newphaseheadshandler extensions.wrapfunction(localrepo.localrepository, "listkeys", localrepolistkeys) wireproto.commands["lookup"] = ( _makelookupwrap(wireproto.commands["lookup"][0]), "key", ) extensions.wrapfunction(exchange, "getbundlechunks", getbundlechunks) extensions.wrapfunction(bundle2, "processparts", processparts) if util.safehasattr(wireproto, "_capabilities"): extensions.wrapfunction(wireproto, "_capabilities", _capabilities) else: extensions.wrapfunction(wireproto, "capabilities", _capabilities)
def __init__(self, handler, commits, refs): self.handler = handler self.changelog = overlaychangelog(self, handler.repo.changelog) if util.safehasattr(handler.repo, "manifest"): self.manifest = overlayoldmanifestlog(self, handler.repo.manifest) # new as of mercurial 3.9+ self.manifestlog = self.manifest else: # no more manifest class as of 4.1 self.manifestlog = overlaymanifestlog(self) # for incoming -p self.root = handler.repo.root self.getcwd = handler.repo.getcwd # self.status = handler.repo.status self.ui = handler.repo.ui self.revmap = None self.nodemap = None self.refmap = None self.tagmap = None self._makemaps(commits, refs) try: # Mercurial >= 3.3 from edenscm.mercurial import namespaces self.names = namespaces.namespaces(self) except (AttributeError, ImportError): pass
def __init__(self, url="", ra=None): self.pool = Pool() self.svn_url = url self.username = "" self.password = "" # Only Subversion 1.4 has reparent() if ra is None or not util.safehasattr(svn.ra, "reparent"): self.client = svn.client.create_context(self.pool) ab = _create_auth_baton(self.pool) self.client.auth_baton = ab global svn_config if svn_config is None: svn_config = svn.core.svn_config_get_config(None) self.client.config = svn_config try: self.ra = svn.client.open_ra_session( self.svn_url, self.client, self.pool ) except SubversionException as xxx_todo_changeme: (inst, num) = xxx_todo_changeme.args if num in ( svn.core.SVN_ERR_RA_ILLEGAL_URL, svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED, svn.core.SVN_ERR_BAD_URL, ): raise NotBranchError(url) raise else: self.ra = ra svn.ra.reparent(self.ra, self.svn_url.encode("utf-8"))
def _create_auth_baton(pool): """Create a Subversion authentication baton.""" import svn.client # Give the client context baton a suite of authentication # providers.h providers = [ svn.client.get_simple_provider(pool), svn.client.get_username_provider(pool), svn.client.get_ssl_client_cert_file_provider(pool), svn.client.get_ssl_client_cert_pw_file_provider(pool), svn.client.get_ssl_server_trust_file_provider(pool), ] # Platform-dependent authentication methods getprovider = getattr(svn.core, "svn_auth_get_platform_specific_provider", None) if getprovider: # Available in svn >= 1.6 for name in ("gnome_keyring", "keychain", "kwallet", "windows"): for type in ("simple", "ssl_client_cert_pw", "ssl_server_trust"): p = getprovider(name, type, pool) if p: providers.append(p) else: if util.safehasattr(svn.client, "get_windows_simple_provider"): providers.append(svn.client.get_windows_simple_provider(pool)) return svn.core.svn_auth_open(providers, pool)
def _updatecallstreamopts(self, command, opts): if command != "getbundle": return if "remotefilelog" not in shallowutil.peercapabilities(self): return if not util.safehasattr(self, "_localrepo"): return if constants.REQUIREMENT not in self._localrepo.requirements: return bundlecaps = opts.get("bundlecaps") if bundlecaps: bundlecaps = [bundlecaps] else: bundlecaps = [] # shallow, includepattern, and excludepattern are a hacky way of # carrying over data from the local repo to this getbundle # command. We need to do it this way because bundle1 getbundle # doesn't provide any other place we can hook in to manipulate # getbundle args before it goes across the wire. Once we get rid # of bundle1, we can use bundle2's _pullbundle2extraprepare to # do this more cleanly. bundlecaps.append("remotefilelog") if self._localrepo.includepattern: patterns = "\0".join(self._localrepo.includepattern) includecap = "includepattern=" + patterns bundlecaps.append(includecap) if self._localrepo.excludepattern: patterns = "\0".join(self._localrepo.excludepattern) excludecap = "excludepattern=" + patterns bundlecaps.append(excludecap) opts["bundlecaps"] = ",".join(bundlecaps)
def _shareddatastoresrepack(repo, incremental): if util.safehasattr(repo.fileslog, "shareddatastores") or repo.fileslog._ruststore: packpath = shallowutil.getcachepackpath(repo, constants.FILEPACK_CATEGORY) limit = repo.ui.configbytes("remotefilelog", "cachelimit", "10GB") _cleanuppacks(repo.ui, packpath, limit) _runrepack(repo, packpath, incremental, True)
def createclientforrepo(repo): """Creates a Watchman client and associates it with the repo if it does not already have one. Note that creating the client may raise an exception. To get the client associated with the repo, use getclientforrepo().""" if not util.safehasattr(repo, "_watchmanclient"): repo._watchmanclient = client(repo)
def _lfsprefetch(self, fileids): if not _lfsmod or not util.safehasattr(self.repo.svfs, "lfslocalblobstore"): return if not _lfsmod.wrapper.candownload(self.repo): return pointers = [] filenames = {} store = self.repo.svfs.lfslocalblobstore for file, id in fileids: node = bin(id) rlog = self.repo.file(file) if rlog.flags(node) & revlog.REVIDX_EXTSTORED: text = rlog.revision(node, raw=True) p = _lfsmod.pointer.deserialize(text) oid = p.oid() if not store.has(oid): pointers.append(p) filenames[oid] = file if len(pointers) > 0: perftrace.tracevalue("Missing", len(pointers)) self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store, objectnames=filenames) assert all(store.has(p.oid()) for p in pointers)
def _cansendflat(self, mfnodes): repo = self._repo if "treeonly" in self._bundlecaps or "True" in self._b2caps.get( "treeonly", []): return False if not util.safehasattr(repo.manifestlog, "_revlog"): return False if treeonly(repo): return False revlog = repo.manifestlog._revlog for mfnode in mfnodes: if mfnode not in revlog.nodemap: return False allowflat = ("allowflatmanifest" in self._bundlecaps or "True" in self._b2caps.get("allowflatmanifest", [])) if repo.ui.configbool("treemanifest", "blocksendflat") and not allowflat: raise error.Abort( "must produce treeonly changegroups in a treeonly repository") return True
def _adjustlinkrev(orig, self, *args, **kwargs): # When generating file blobs, taking the real path is too slow on large # repos, so force it to just return the linkrev directly. repo = self._repo if util.safehasattr(repo, "forcelinkrev") and repo.forcelinkrev: return self._filelog.linkrev(self._filelog.rev(self._filenode)) return orig(self, *args, **kwargs)
def test_gca(): u = uimod.ui.load() for i, (dag, tests) in enumerate(dagtests): repo = hg.repository(u, b"gca%d" % i, create=1) cl = repo.changelog if not util.safehasattr(cl.index, "ancestors"): # C version not available return debugcommands.debugbuilddag(u, repo, dag) # Compare the results of the Python and C versions. This does not # include choosing a winner when more than one gca exists -- we make # sure both return exactly the same set of gcas. # Also compare against expected results, if available. for a in cl: for b in cl: cgcas = sorted(cl.index.ancestors(a, b)) pygcas = sorted(ancestor.ancestors(cl.parentrevs, a, b)) expected = None if (a, b) in tests: expected = tests[(a, b)] if cgcas != pygcas or (expected and cgcas != expected): print("test_gca: for dag %s, gcas for %d, %d:" % (dag, a, b)) print(" C returned: %s" % cgcas) print(" Python returned: %s" % pygcas) if expected: print(" expected: %s" % expected)
def _manifestrepack(repo, incremental): if repo.ui.configbool("treemanifest", "server"): # This code path is no longer used. Will be deleted soon. pass elif util.safehasattr(repo.manifestlog, "datastore"): if repo.ui.configbool("treemanifest", "useruststore"): # Shared _shareddatastoresrepack(repo, incremental, constants.TREEPACK_CATEGORY) # Local _localdatarepack(repo, incremental, constants.TREEPACK_CATEGORY) else: localdata, shareddata = _getmanifeststores(repo) lpackpath, ldstores, lhstores = localdata spackpath, sdstores, shstores = shareddata def _domanifestrepack(packpath, dstores, hstores, shared): limit = (repo.ui.configbytes("remotefilelog", "manifestlimit", "2GB") if shared else 0) _cleanuppacks(repo.ui, packpath, limit) runrepacklegacy(repo.ui, packpath, incremental, shared) # Repack the shared manifest store _domanifestrepack(spackpath, sdstores, shstores, True) # Repack the local manifest store _domanifestrepack(lpackpath, ldstores, lhstores, False)
def close(self): # the check is necessary if __init__ fails - the caller may call # "close" in a "finally" block and it probably does not want close() to # raise an exception there. if util.safehasattr(self, "_dbs"): for db in self._dbs.itervalues(): db.close() self._dbs.clear()
def exchangepull(orig, repo, remote, *args, **kwargs): # Hook into the callstream/getbundle to insert bundle capabilities # during a pull. def localgetbundle( orig, source, heads=None, common=None, bundlecaps=None, **kwargs ): if not bundlecaps: bundlecaps = set() bundlecaps.add("remotefilelog") return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs) if util.safehasattr(remote, "_callstream"): remote._localrepo = repo elif util.safehasattr(remote, "getbundle"): wrapfunction(remote, "getbundle", localgetbundle) return orig(repo, remote, *args, **kwargs)
def cleandict(d): if not isinstance(d, dict): return d return dict( (k, cleandict(v)) for k, v in pycompat.iteritems(d) if (v is not None and not (util.safehasattr(v, "__len__") and len(v) == 0)) )
def close(self): super(linkrevdbwritewithtemprename, self).close() if util.safehasattr(self, "_tempdir"): for name in sorted(os.listdir(self._tempdir)): oldpath = os.path.join(self._tempdir, name) newpath = os.path.join(self._origpath, name) os.rename(oldpath, newpath) os.rmdir(self._tempdir)