def parse_manifest(mfdict, fdict, lines): for l in lines.splitlines(): f, n = l.split('\0') if len(n) > 40: fdict[f] = n[40:] mfdict[f] = bin(n[:40]) else: mfdict[f] = bin(n)
def parse_manifest(mfdict, fdict, lines): for l in lines.splitlines(): f, n = l.split("\0") if len(n) > 40: fdict[f] = n[40:] mfdict[f] = bin(n[:40]) else: mfdict[f] = bin(n)
def parse(self, lines): mfdict = manifestdict() fdict = mfdict._flags for l in lines.splitlines(): f, n = l.split('\0') if len(n) > 40: fdict[f] = n[40:] mfdict[f] = bin(n[:40]) else: mfdict[f] = bin(n) return mfdict
def do_changegroupsubset(self): argmap = dict([self.getarg(), self.getarg()]) bases = [bin(n) for n in argmap['bases'].split(' ')] heads = [bin(n) for n in argmap['heads'].split(' ')] cg = self.repo.changegroupsubset(bases, heads, 'serve') while True: d = cg.read(4096) if not d: break self.fout.write(d) self.fout.flush()
def diff(self, diffopts, node2, match, prefix, **opts): try: node1 = node.bin(self._state[1]) # We currently expect node2 to come from substate and be # in hex format if node2 is not None: node2 = node.bin(node2) cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts, node1, node2, match, prefix=os.path.join(prefix, self._path), listsubrepos=True, **opts) except error.RepoLookupError, inst: self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n') % (inst, subrelpath(self)))
def lookup(self, key): self.requirecap("lookup", _("look up remote revision")) d = self._call("lookup", key=key) success, data = d[:-1].split(" ", 1) if int(success): return bin(data) self._abort(error.RepoError(data))
def analyzeremotephases(repo, subset, roots): """Compute phases heads and root in a subset of node from root dict * subset is heads of the subset * roots is {<nodeid> => phase} mapping. key and value are string. Accept unknown element input """ repo = repo.unfiltered() # build list from dictionary draftroots = [] nodemap = repo.changelog.nodemap # to filter unknown nodes for nhex, phase in roots.iteritems(): if nhex == 'publishing': # ignore data related to publish option continue node = bin(nhex) phase = int(phase) if phase == 0: if node != nullid: repo.ui.warn( _('ignoring inconsistent public root' ' from remote: %s\n') % nhex) elif phase == 1: if node in nodemap: draftroots.append(node) else: repo.ui.warn( _('ignoring unexpected root from remote: %i %s\n') % (phase, nhex)) # compute heads publicheads = newheads(repo, subset, draftroots) return publicheads, draftroots
def renamed(self, node): if self.parents(node)[0] != nullid: return False m = self._readmeta(node) if m and "copy" in m: return (m["copy"], bin(m["copyrev"])) return False
def _match(self, id): if isinstance(id, (long, int)): # rev return self.node(id) if len(id) == 20: # possibly a binary node # odds of a binary node being all hex in ASCII are 1 in 10**25 try: node = id self.rev(node) # quick search the index return node except LookupError: pass # may be partial hex id try: # str(rev) rev = int(id) if str(rev) != id: raise ValueError if rev < 0: rev = len(self) + rev if rev < 0 or rev >= len(self): raise ValueError return self.node(rev) except (ValueError, OverflowError): pass if len(id) == 40: try: # a full hex nodeid? node = bin(id) self.rev(node) return node except (TypeError, LookupError): pass
def _readroots(repo, phasedefaults=None): """Read phase roots from disk phasedefaults is a list of fn(repo, roots) callable, which are executed if the phase roots file does not exist. When phases are being initialized on an existing repository, this could be used to set selected changesets phase to something else than public. Return (roots, dirty) where dirty is true if roots differ from what is being stored. """ repo = repo.unfiltered() dirty = False roots = [set() for i in allphases] try: f = repo.sopener('phaseroots') try: for line in f: phase, nh = line.split() roots[int(phase)].add(bin(nh)) finally: f.close() except IOError, inst: if inst.errno != errno.ENOENT: raise if phasedefaults: for f in phasedefaults: roots = f(repo, roots) dirty = True
def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) d = self.do_cmd("lookup", key = key).read() success, data = d[:-1].split(' ', 1) if int(success): return bin(data) raise error.RepoError(data)
def _readroots(repo, phasedefaults=None): """Read phase roots from disk phasedefaults is a list of fn(repo, roots) callable, which are executed if the phase roots file does not exist. When phases are being initialized on an existing repository, this could be used to set selected changesets phase to something else than public. Return (roots, dirty) where dirty is true if roots differ from what is being stored. """ repo = repo.unfiltered() dirty = False roots = [set() for i in allphases] try: f = repo.sopener("phaseroots") try: for line in f: phase, nh = line.split() roots[int(phase)].add(bin(nh)) finally: f.close() except IOError, inst: if inst.errno != errno.ENOENT: raise if phasedefaults: for f in phasedefaults: roots = f(repo, roots) dirty = True
def analyzeremotephases(repo, subset, roots): """Compute phases heads and root in a subset of node from root dict * subset is heads of the subset * roots is {<nodeid> => phase} mapping. key and value are string. Accept unknown element input """ repo = repo.unfiltered() # build list from dictionary draftroots = [] nodemap = repo.changelog.nodemap # to filter unknown nodes for nhex, phase in roots.iteritems(): if nhex == "publishing": # ignore data related to publish option continue node = bin(nhex) phase = int(phase) if phase == 0: if node != nullid: repo.ui.warn(_("ignoring inconsistent public root" " from remote: %s\n") % nhex) elif phase == 1: if node in nodemap: draftroots.append(node) else: repo.ui.warn(_("ignoring unexpected root from remote: %i %s\n") % (phase, nhex)) # compute heads publicheads = newheads(repo, subset, draftroots) return publicheads, draftroots
def _partialmatch(self, id): try: return self.index.partialmatch(id) except RevlogError: # parsers.c radix tree lookup gave multiple matches raise LookupError(id, self.indexfile, _("ambiguous identifier")) except (AttributeError, ValueError): # we are pure python, or key was too short to search radix tree pass if id in self._pcache: return self._pcache[id] if len(id) < 40: try: # hex(node)[:...] l = len(id) // 2 # grab an even number of digits prefix = bin(id[:l * 2]) nl = [e[7] for e in self.index if e[7].startswith(prefix)] nl = [n for n in nl if hex(n).startswith(id)] if len(nl) > 0: if len(nl) == 1: self._pcache[id] = nl[0] return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass
def _partialmatch(self, id): try: n = self.index.partialmatch(id) if n and self.hasnode(n): return n return None except RevlogError: # parsers.c radix tree lookup gave multiple matches # fall through to slow path that filters hidden revisions pass except (AttributeError, ValueError): # we are pure python, or key was too short to search radix tree pass if id in self._pcache: return self._pcache[id] if len(id) < 40: try: # hex(node)[:...] l = len(id) // 2 # grab an even number of digits prefix = bin(id[:l * 2]) nl = [e[7] for e in self.index if e[7].startswith(prefix)] nl = [ n for n in nl if hex(n).startswith(id) and self.hasnode(n) ] if len(nl) > 0: if len(nl) == 1: self._pcache[id] = nl[0] return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass
def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) d = self._call("lookup", key=encoding.fromlocal(key)) success, data = d[:-1].split(" ", 1) if int(success): return bin(data) self._abort(error.RepoError(data))
def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) d = self.do_cmd("lookup", key=key).read() success, data = d[:-1].split(' ', 1) if int(success): return bin(data) raise error.RepoError(data)
def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) d = self.call("lookup", key=key) success, data = d[:-1].split(" ", 1) if int(success): return bin(data) else: self.raise_(repo.RepoError(data))
def read(repo): try: f = repo.vfs(_filename(repo)) lines = f.read().split('\n') f.close() except (IOError, OSError): return None try: cachekey = lines.pop(0).split(" ", 2) last, lrev = cachekey[:2] last, lrev = bin(last), int(lrev) filteredhash = None if len(cachekey) > 2: filteredhash = bin(cachekey[2]) partial = branchcache(tipnode=last, tiprev=lrev, filteredhash=filteredhash) if not partial.validfor(repo): # invalidate the cache raise ValueError('tip differs') for l in lines: if not l: continue node, state, label = l.split(" ", 2) if state not in 'oc': raise ValueError('invalid branch state') label = encoding.tolocal(label.strip()) if not node in repo: raise ValueError('node %s does not exist' % node) node = bin(node) partial.setdefault(label, []).append(node) if state == 'c': partial._closednodes.add(node) except KeyboardInterrupt: raise except Exception as inst: if repo.ui.debugflag: msg = 'invalid branchheads cache' if repo.filtername is not None: msg += ' (%s)' % repo.filtername msg += ': %s\n' repo.ui.debug(msg % inst) partial = None return partial
def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) f = future() yield todict(key=encoding.fromlocal(key)), f d = f.value success, data = d[:-1].split(" ", 1) if int(success): yield bin(data) self._abort(error.RepoError(data))
def lookup(self, key): self.requirecap("lookup", _("look up remote revision")) f = future() yield {"key": encoding.fromlocal(key)}, f d = f.value success, data = d[:-1].split(" ", 1) if int(success): yield bin(data) self._abort(error.RepoError(data))
def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False): '''Read tag definitions from a file (or any source of lines). This function returns two sortdicts with similar information: - the first dict, bintaghist, contains the tag information as expected by the _readtags function, i.e. a mapping from tag name to (node, hist): - node is the node id from the last line read for that name, - hist is the list of node ids previously associated with it (in file order). All node ids are binary, not hex. - the second dict, hextaglines, is a mapping from tag name to a list of [hexnode, line number] pairs, ordered from the oldest to the newest node. When calcnodelines is False the hextaglines dict is not calculated (an empty dict is returned). This is done to improve this function's performance in cases where the line numbers are not needed. ''' bintaghist = util.sortdict() hextaglines = util.sortdict() count = 0 def warn(msg): ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) for nline, line in enumerate(lines): count += 1 if not line: continue try: (nodehex, name) = line.split(" ", 1) except ValueError: warn(_("cannot parse entry")) continue name = name.strip() if recode: name = recode(name) try: nodebin = bin(nodehex) except TypeError: warn(_("node '%s' is not well formed") % nodehex) continue # update filetags if calcnodelines: # map tag name to a list of line numbers if name not in hextaglines: hextaglines[name] = [] hextaglines[name].append([nodehex, nline]) continue # map tag name to (node, hist) if name not in bintaghist: bintaghist[name] = [] bintaghist[name].append(nodebin) return bintaghist, hextaglines
def _read(self): """Analyse each record content to restore a serialized state from disk This function process "record" entry produced by the de-serialization of on disk file. """ self._state = {} records = self._readrecords() for rtype, record in records: if rtype == 'L': self._local = bin(record) elif rtype == 'O': self._other = bin(record) elif rtype == "F": bits = record.split("\0") self._state[bits[0]] = bits[1:] elif not rtype.islower(): raise util.Abort(_('unsupported merge state record: %s') % rtype) self._dirty = False
def find(self, node, f): '''look up entry for a single file efficiently. return (node, flags) pair if found, (None, None) if not.''' if self.mapcache and node == self.mapcache[0]: return self.mapcache[1].get(f), self.mapcache[1].flags(f) text = self.revision(node) start, end = self._search(text, f) if start == end: return None, None l = text[start:end] f, n = l.split('\0') return bin(n[:40]), n[40:-1]
def branchmap(self): d = self.call("branchmap") try: branchmap = {} for branchpart in d.splitlines(): branchheads = branchpart.split(' ') branchname = urllib.unquote(branchheads[0]) branchheads = [bin(x) for x in branchheads[1:]] branchmap[branchname] = branchheads return branchmap except: raise error.ResponseError(_("unexpected response:"), d)
def _fm0readmarkers(data, off=0): # Loop on markers l = len(data) while off + _fm0fsize <= l: # read fixed part cur = data[off : off + _fm0fsize] off += _fm0fsize numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) # read replacement sucs = () if numsuc: s = _fm0fnodesize * numsuc cur = data[off : off + s] sucs = _unpack(_fm0node * numsuc, cur) off += s # read metadata # (metadata will be decoded on demand) metadata = data[off : off + mdsize] if len(metadata) != mdsize: raise util.Abort( _("parsing obsolete marker: metadata is too " "short, %d bytes expected, got %d") % (mdsize, len(metadata)) ) off += mdsize metadata = _fm0decodemeta(metadata) try: when, offset = metadata.pop("date", "0 0").split(" ") date = float(when), int(offset) except ValueError: date = (0.0, 0) parents = None if "p2" in metadata: parents = (metadata.pop("p1", None), metadata.pop("p2", None)) elif "p1" in metadata: parents = (metadata.pop("p1", None),) elif "p0" in metadata: parents = () if parents is not None: try: parents = tuple(node.bin(p) for p in parents) # if parent content is not a nodeid, drop the data for p in parents: if len(p) != 20: parents = None break except TypeError: # if content cannot be translated to nodeid drop the data. parents = None metadata = tuple(sorted(metadata.iteritems())) yield (pre, sucs, flags, metadata, date, parents)
def _fm0readmarkers(data, off): # Loop on markers l = len(data) while off + _fm0fsize <= l: # read fixed part cur = data[off:off + _fm0fsize] off += _fm0fsize numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur) # read replacement sucs = () if numsuc: s = (_fm0fnodesize * numsuc) cur = data[off:off + s] sucs = _unpack(_fm0node * numsuc, cur) off += s # read metadata # (metadata will be decoded on demand) metadata = data[off:off + mdsize] if len(metadata) != mdsize: raise util.Abort( _('parsing obsolete marker: metadata is too ' 'short, %d bytes expected, got %d') % (mdsize, len(metadata))) off += mdsize metadata = _fm0decodemeta(metadata) try: when, offset = metadata.pop('date', '0 0').split(' ') date = float(when), int(offset) except ValueError: date = (0., 0) parents = None if 'p2' in metadata: parents = (metadata.pop('p1', None), metadata.pop('p2', None)) elif 'p1' in metadata: parents = (metadata.pop('p1', None), ) elif 'p0' in metadata: parents = () if parents is not None: try: parents = tuple(node.bin(p) for p in parents) # if parent content is not a nodeid, drop the data for p in parents: if len(p) != 20: parents = None break except TypeError: # if content cannot be translated to nodeid drop the data. parents = None metadata = tuple(sorted(metadata.iteritems())) yield (pre, sucs, flags, metadata, date, parents)
def _read(self): self._state = {} try: f = self._repo.opener("merge/state") for i, l in enumerate(f): if i == 0: self._local = bin(l[:-1]) else: bits = l[:-1].split("\0") self._state[bits[0]] = bits[1:] except IOError, err: if err.errno != errno.ENOENT: raise
def _partialmatch(self, id): if len(id) < 40: try: # hex(node)[:...] bin_id = bin(id[:len(id) & ~1]) # grab an even number of digits node = None for n in self.nodemap: if n.startswith(bin_id) and hex(n).startswith(id): if node is not None: raise LookupError(id, self.indexfile, _('ambiguous identifier')) node = n if node is not None: return node except TypeError: pass
def _partialmatch(self, id): if len(id) < 40: try: # hex(node)[:...] l = len(id) / 2 # grab an even number of digits bin_id = bin(id[:l*2]) nl = [n for n in self.nodemap if n[:l] == bin_id] nl = [n for n in nl if hex(n).startswith(id)] if len(nl) > 0: if len(nl) == 1: return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass
def _partialmatch(self, id): if len(id) < 40: try: # hex(node)[:...] l = len(id) // 2 # grab an even number of digits bin_id = bin(id[:l * 2]) nl = [n for n in self.nodemap if n[:l] == bin_id] nl = [n for n in nl if hex(n).startswith(id)] if len(nl) > 0: if len(nl) == 1: return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass
def readroots(repo): """Read phase roots from disk""" roots = [set() for i in allphases] try: f = repo.sopener('phaseroots') try: for line in f: phase, nh = line.strip().split() roots[int(phase)].add(bin(nh)) finally: f.close() except IOError, inst: if inst.errno != errno.ENOENT: raise for f in repo._phasedefaults: roots = f(repo, roots) repo._dirtyphases = True
def pushphase(repo, nhex, oldphasestr, newphasestr): """List phases root for serialisation over pushkey""" lock = repo.lock() try: currentphase = repo[nhex].phase() newphase = abs(int(newphasestr)) # let's avoid negative index surprise oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise if currentphase == oldphase and newphase < oldphase: advanceboundary(repo, newphase, [bin(nhex)]) return 1 elif currentphase == newphase: # raced, but got correct result return 1 else: return 0 finally: lock.release()
def branchmap(self): d = self.do_read("branchmap") try: branchmap = {} for branchpart in d.splitlines(): branchheads = branchpart.split(' ') branchname = urllib.unquote(branchheads[0]) # Earlier servers (1.3.x) send branch names in (their) local # charset. The best we can do is assume it's identical to our # own local charset, in case it's not utf-8. try: branchname.decode('utf-8') except UnicodeDecodeError: branchname = encoding.fromlocal(branchname) branchheads = [bin(x) for x in branchheads[1:]] branchmap[branchname] = branchheads return branchmap except: raise error.ResponseError(_("unexpected response:"), d)
def _partialmatch(self, id): if id in self._pcache: return self._pcache[id] if len(id) < 40: try: # hex(node)[:...] l = len(id) // 2 # grab an even number of digits prefix = bin(id[:l * 2]) nl = [e[7] for e in self.index if e[7].startswith(prefix)] nl = [n for n in nl if hex(n).startswith(id)] if len(nl) > 0: if len(nl) == 1: self._pcache[id] = nl[0] return nl[0] raise LookupError(id, self.indexfile, _('ambiguous identifier')) return None except TypeError: pass
def _readtags(ui, repo, lines, fn, recode=None): '''Read tag definitions from a file (or any source of lines). Return a mapping from tag name to (node, hist): node is the node id from the last line read for that name, and hist is the list of node ids previously associated with it (in file order). All node ids are binary, not hex.''' filetags = {} # map tag name to (node, hist) count = 0 def warn(msg): ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) for line in lines: count += 1 if not line: continue try: (nodehex, name) = line.split(" ", 1) except ValueError: warn(_("cannot parse entry")) continue name = name.strip() if recode: name = recode(name) try: nodebin = bin(nodehex) except TypeError: warn(_("node '%s' is not well formed") % nodehex) continue if nodebin not in repo.changelog.nodemap: # silently ignore as pull -r might cause this continue # update filetags hist = [] if name in filetags: n, hist = filetags[name] hist.append(n) filetags[name] = (nodebin, hist) return filetags
def read(self, node): """ format used: nodeid\n : manifest node in ascii user\n : user, no \n or \r allowed time tz extra\n : date (time is int or float, timezone is int) : extra is metadatas, encoded and separated by '\0' : older versions ignore it files\n\n : files modified by the cset, no \n or \r allowed (.*) : comment (free text, ideally utf-8) changelog v0 doesn't use extra """ text = self.revision(node) if not text: return (nullid, "", (0, 0), [], "", {'branch': 'default'}) last = text.index("\n\n") desc = encoding.tolocal(text[last + 2:]) l = text[:last].split('\n') manifest = bin(l[0]) user = encoding.tolocal(l[1]) extra_data = l[2].split(' ', 2) if len(extra_data) != 3: time = float(extra_data.pop(0)) try: # various tools did silly things with the time zone field. timezone = int(extra_data[0]) except ValueError: timezone = 0 extra = {} else: time, timezone, extra = extra_data time, timezone = float(time), int(timezone) extra = decodeextra(extra) if not extra.get('branch'): extra['branch'] = 'default' files = l[3:] return (manifest, user, (time, timezone), files, desc, extra)
def pushphase(repo, nhex, oldphasestr, newphasestr): """List phases root for serialization over pushkey""" repo = repo.unfiltered() tr = None lock = repo.lock() try: currentphase = repo[nhex].phase() newphase = abs(int(newphasestr)) # let's avoid negative index surprise oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise if currentphase == oldphase and newphase < oldphase: tr = repo.transaction('pushkey-phase') advanceboundary(repo, tr, newphase, [bin(nhex)]) tr.close() return 1 elif currentphase == newphase: # raced, but got correct result return 1 else: return 0 finally: if tr: tr.release() lock.release()
def _readtagcache(ui, repo): '''Read the tag cache and return a tuple (heads, fnodes, cachetags, shouldwrite). If the cache is completely up-to-date, cachetags is a dict of the form returned by _readtags(); otherwise, it is None and heads and fnodes are set. In that case, heads is the list of all heads currently in the repository (ordered from tip to oldest) and fnodes is a mapping from head to .hgtags filenode. If those two are set, caller is responsible for reading tag info from each head.''' try: cachefile = repo.opener('tags.cache', 'r') _debug(ui, 'reading tag cache from %s\n' % cachefile.name) except IOError: cachefile = None # The cache file consists of lines like # <headrev> <headnode> [<tagnode>] # where <headrev> and <headnode> redundantly identify a repository # head from the time the cache was written, and <tagnode> is the # filenode of .hgtags on that head. Heads with no .hgtags file will # have no <tagnode>. The cache is ordered from tip to oldest (which # is part of why <headrev> is there: a quick visual check is all # that's required to ensure correct order). # # This information is enough to let us avoid the most expensive part # of finding global tags, which is looking up <tagnode> in the # manifest for each head. cacherevs = [] # list of headrev cacheheads = [] # list of headnode cachefnode = {} # map headnode to filenode if cachefile: for line in cachefile: if line == "\n": break line = line.rstrip().split() cacherevs.append(int(line[0])) headnode = bin(line[1]) cacheheads.append(headnode) if len(line) == 3: fnode = bin(line[2]) cachefnode[headnode] = fnode tipnode = repo.changelog.tip() tiprev = len(repo.changelog) - 1 # Case 1 (common): tip is the same, so nothing has changed. # (Unchanged tip trivially means no changesets have been added. # But, thanks to localrepository.destroyed(), it also means none # have been destroyed by strip or rollback.) if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev: _debug(ui, "tag cache: tip unchanged\n") tags = _readtags(ui, repo, cachefile, cachefile.name) cachefile.close() return (None, None, tags, False) if cachefile: cachefile.close() # ignore rest of file repoheads = repo.heads() # Case 2 (uncommon): empty repo; get out quickly and don't bother # writing an empty cache. if repoheads == [nullid]: return ([], {}, {}, False) # Case 3 (uncommon): cache file missing or empty. if not cacheheads: _debug(ui, 'tag cache: cache file missing or empty\n') # Case 4 (uncommon): tip rev decreased. This should only happen # when we're called from localrepository.destroyed(). Refresh the # cache so future invocations will not see disappeared heads in the # cache. elif cacheheads and tiprev < cacherevs[0]: _debug( ui, 'tag cache: tip rev decremented (from %d to %d), ' 'so we must be destroying nodes\n' % (cacherevs[0], tiprev)) # Case 5 (common): tip has changed, so we've added/replaced heads. else: _debug( ui, 'tag cache: tip has changed (%d:%s); must find new heads\n' % (tiprev, short(tipnode))) # Luckily, the code to handle cases 3, 4, 5 is the same. So the # above if/elif/else can disappear once we're confident this thing # actually works and we don't need the debug output. # N.B. in case 4 (nodes destroyed), "new head" really means "newly # exposed". newheads = [head for head in repoheads if head not in set(cacheheads)] _debug( ui, 'tag cache: found %d head(s) not in cache: %s\n' % (len(newheads), map(short, newheads))) # Now we have to lookup the .hgtags filenode for every new head. # This is the most expensive part of finding tags, so performance # depends primarily on the size of newheads. Worst case: no cache # file, so newheads == repoheads. for head in newheads: cctx = repo[head] try: fnode = cctx.filenode('.hgtags') cachefnode[head] = fnode except error.LookupError: # no .hgtags file on this head pass # Caller has to iterate over all heads, but can use the filenodes in # cachefnode to get to each .hgtags revision quickly. return (repoheads, cachefnode, None, True)