def _read(self, id): """reads the raw file blob from disk, cache, or server""" fileservice = self.repo.fileservice localcache = fileservice.localcache cachekey = fileserverclient.getcachekey( self.repo.name, self.filename, id ) try: return localcache.read(cachekey) except KeyError: pass localkey = fileserverclient.getlocalkey(self.filename, id) localpath = os.path.join(self.localpath, localkey) try: return shallowutil.readfile(localpath) except IOError: pass fileservice.prefetch([(self.filename, id)]) try: return localcache.read(cachekey) except KeyError: pass raise error.LookupError(id, self.filename, _(b'no node'))
def lookup(self, node): if isinstance(node, int): return self.node(node) if len(node) == 20: self.rev(node) return node try: rev = int(node) if '%d' % rev != node: raise ValueError if rev < 0: rev = len(self) + rev if rev < 0 or rev >= len(self): raise ValueError return self.node(rev) except (ValueError, OverflowError): pass if len(node) == 40: try: rawnode = bin(node) self.rev(rawnode) return rawnode except TypeError: pass raise error.LookupError(node, self._path, _('invalid lookup input'))
def rev(self, node): if node == nullid: return nullrev if node not in self._nodetorev: raise error.LookupError(node, self._path, _(b'no node')) return self._nodetorev[node]
def node(self, r): if r == nullrev: return sha1nodeconstants.nullid t = self._db.execute('SELECT node FROM changelog WHERE rev = ?', (r, )).fetchone() if t is None: raise error.LookupError(r, b'00changelog.i', _(b'no node')) return bin(t[0])
def rev(self, n): if n == sha1nodeconstants.nullid: return -1 t = self._db.execute('SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n), )).fetchone() if t is None: raise error.LookupError(n, b'00changelog.i', _(b'no node %d')) return t[0]
def lookup(self, node): if len(node) == 40: node = bin(node) if len(node) != 20: raise error.LookupError(node, self.filename, _('invalid lookup input')) return node
def parents(self, node): if node == nullid: return nullid, nullid if node not in self._revisions: raise error.LookupError(node, self._path, _(b'no node')) entry = self._revisions[node] return entry.p1node, entry.p2node
def x_rfl_getfile(self, file, node): if not self.capable('x_rfl_getfile'): raise error.Abort( 'configured remotefile server does not support getfile') f = wireprotov1peer.future() yield {'file': file, 'node': node}, f code, data = f.value.split('\0', 1) if int(code): raise error.LookupError(file, node, data) yield data
def rev(self, node): row = self._db.execute( ''' SELECT rev FROM changelog INNER JOIN changedfiles ON changelog.node = changedfiles.node WHERE changedfiles.filename = ? AND changedfiles.filenode = ?''', (pycompat.fsdecode(self.path), gitutil.togitnode(node)), ).fetchone() if row is None: raise error.LookupError(self.path, node, _(b'no such node')) return int(row[0])
def lookup(self, node): if len(node) not in (20, 40): node = int(node) if isinstance(node, int): assert False, b'todo revnums for nodes' if len(node) == 40: node = bin(node) hnode = gitutil.togitnode(node) if hnode in self.gitrepo: return node raise error.LookupError(self.path, node, _(b'no match found'))
def rev(self, node): validatenode(node) try: self._indexbynode[node] except KeyError: raise error.LookupError(node, self._indexpath, _('no node')) for rev, entry in self._indexbyrev.items(): if entry[b'node'] == node: return rev raise error.ProgrammingError('this should not occur')
def revision(self, node, raw=False, _verifyhash=True): if node in (nullid, nullrev): return b'' if isinstance(node, int): node = self.node(node) if node not in self._nodetorev: raise error.LookupError(node, self._path, _(b'no node')) if node in self._revisioncache: return self._revisioncache[node] # Because we have a fulltext revision cache, we are able to # short-circuit delta chain traversal and decompression as soon as # we encounter a revision in the cache. stoprids = {self._revisions[n].rid: n for n in self._revisioncache} if not stoprids: stoprids[-1] = None fulltext = resolvedeltachain( self._db, self._pathid, node, self._revisioncache, stoprids, zstddctx=self._dctx, ) # Don't verify hashes if parent nodes were rewritten, as the hash # wouldn't verify. if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2): _verifyhash = False if _verifyhash: self._checkhash(fulltext, node) self._revisioncache[node] = fulltext return fulltext
def revision(self, node, raw=False): """returns the revlog contents at this node. this includes the meta data traditionally included in file revlogs. this is generally only used for bundling and communicating with vanilla hg clients. """ if node == nullid: return "" if len(node) != 20: raise error.LookupError(node, self.filename, _('invalid revision input')) store = self.repo.contentstore rawtext = store.get(self.filename, node) if raw: return rawtext flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) if flags == 0: return rawtext text, verifyhash = self._processflags(rawtext, flags, 'read') return text
def revision(self, node, raw=False): """returns the revlog contents at this node. this includes the meta data traditionally included in file revlogs. this is generally only used for bundling and communicating with vanilla hg clients. """ if node == nullid: return b"" if len(node) != 20: raise error.LookupError( node, self.filename, _(b'invalid revision input') ) if node == wdirid or node in wdirfilenodeids: raise error.WdirUnsupported store = self.repo.contentstore rawtext = store.get(self.filename, node) if raw: return rawtext flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) if flags == 0: return rawtext return flagutil.processflagsread(self, rawtext, flags)[0]
def revision(self, node): """returns the revlog contents at this node. this includes the meta data traditionally included in file revlogs. this is generally only used for bundling and communicating with vanilla hg clients. """ if node == nullid: return "" if len(node) != 20: raise error.LookupError(node, self.filename, _('invalid revision input')) raw = self._read(hex(node)) index, size = self._parsesize(raw) data = raw[(index + 1):(index + 1 + size)] mapping = self.ancestormap(node) p1, p2, linknode, copyfrom = mapping[node] copyrev = None if copyfrom: copyrev = hex(p1) return _createrevlogtext(data, copyfrom, copyrev)
def ancestormap(self, node, relativeto=None): # ancestormaps are a bit complex, and here's why: # # The key for filelog blobs contains the hash for the file path and for # the file version. But these hashes do not include information about # the linknodes included in the blob. So it's possible to have multiple # blobs with the same key but different linknodes (for example, if you # rebase you will have the exact same file version, but with a different # linknode). So when reading the ancestormap (which contains linknodes) # we need to make sure all the linknodes are valid in this repo, so we # read through all versions that have ever existed, and pick one that # contains valid linknodes. If we can't find one locally, we then try # the server. hexnode = hex(node) localcache = self.repo.fileservice.localcache reponame = self.repo.name for i in range(0, 2): cachekey = fileserverclient.getcachekey(reponame, self.filename, hexnode) try: raw = localcache.read(cachekey) mapping = self._ancestormap(node, raw, relativeto, fromserver=True) if mapping: return mapping except KeyError: pass localkey = fileserverclient.getlocalkey(self.filename, hexnode) localpath = os.path.join(self.localpath, localkey) try: raw = _readfile(localpath) mapping = self._ancestormap(node, raw, relativeto) if mapping: return mapping except IOError: pass # past versions may contain valid linknodes try: filename = os.path.basename(localpath) directory = os.path.dirname(localpath) alternates = [ f for f in os.listdir(directory) if len(f) > 40 and f.startswith(filename) ] alternates = sorted(alternates, key=lambda x: int(x[40:])) for alternate in alternates: alternatepath = os.path.join(directory, alternate) try: raw = _readfile(alternatepath) mapping = self._ancestormap(node, raw, relativeto) if mapping: return mapping except IOError: pass except OSError: # Directory doesn't exist. Oh well pass # If exists locally, but with a bad history, adjust the linknodes # manually. if relativeto and os.path.exists(localpath): raw = _readfile(localpath) mapping = self._ancestormap(node, raw, relativeto, adjustlinknodes=True) if mapping: return mapping # Fallback to the server cache self.repo.fileservice.prefetch([(self.filename, hexnode)], force=True) try: raw = localcache.read(cachekey) mapping = self._ancestormap(node, raw, relativeto, fromserver=True) if mapping: return mapping except KeyError: pass raise error.LookupError(node, self.filename, _('no valid file history'))