def revision(self, node): """return an uncompressed revision of a given""" if node == nullid: return "" text = None chain = [] iter_node = node rev = self.rev(iter_node) # reconstruct the revision if it is from a changegroup while self.inbundle(rev): if self._cache and self._cache[0] == iter_node: text = self._cache[2] break chain.append(rev) iter_node = self.bundlebase(rev) rev = self.rev(iter_node) if text is None: text = revlog.revlog.revision(self, iter_node) while chain: delta = self._chunk(chain.pop()) text = mdiff.patches(text, [delta]) p1, p2 = self.parents(node) if node != revlog.hash(text, p1, p2): raise error.RevlogError(_("integrity check failed on %s:%d") % (self.datafile, self.rev(node))) self._cache = (node, self.rev(node), text) return text
def revision(self, nodeorrev): """return an uncompressed revision of a given node or revision number. """ if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) else: node = nodeorrev rev = self.rev(node) if node == nullid: return "" text = None chain = [] iterrev = rev # reconstruct the revision if it is from a changegroup while iterrev > self.repotiprev: if self._cache and self._cache[1] == iterrev: text = self._cache[2] break chain.append(iterrev) iterrev = self.index[iterrev][3] if text is None: text = self.baserevision(iterrev) while chain: delta = self._chunk(chain.pop()) text = mdiff.patches(text, [delta]) self._checkhash(text, node, rev) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given""" if node == nullid: return "" text = None chain = [] iter_node = node rev = self.rev(iter_node) # reconstruct the revision if it is from a changegroup while self.bundle(rev): if self._cache and self._cache[0] == iter_node: text = self._cache[2] break chain.append(rev) iter_node = self.bundlebase(rev) rev = self.rev(iter_node) if text is None: text = revlog.revlog.revision(self, iter_node) while chain: delta = self.chunk(chain.pop()) text = mdiff.patches(text, [delta]) p1, p2 = self.parents(node) if node != revlog.hash(text, p1, p2): raise revlog.RevlogError( _("integrity check failed on %s:%d") % (self.datafile, self.rev(node))) self._cache = (node, self.rev(node), text) return text
def revision(self, node): """return an uncompressed revision of a given node""" cachedrev = None if node == nullid: return "" if self._cache: if self._cache[0] == node: return self._cache[2] cachedrev = self._cache[1] # look up what we need to read text = None rev = self.rev(node) base = self.base(rev) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError( _('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain self._loadindex(base, rev + 1) chain = [] index = self.index # for performance iterrev = rev e = index[iterrev] while iterrev != base and iterrev != cachedrev: chain.append(iterrev) if e[0] & REVIDX_PARENTDELTA: iterrev = e[5] else: iterrev -= 1 e = index[iterrev] chain.reverse() base = iterrev if iterrev == cachedrev: # cache hit text = self._cache[2] # drop cache to save memory self._cache = None self._chunkraw(base, rev) if text is None: text = self._chunk(base) bins = [self._chunk(r) for r in chain] text = mdiff.patches(text, bins) p1, p2 = self.parents(node) if (node != hash(text, p1, p2) and not (self.flags(rev) & REVIDX_PUNCHED_FLAG)): raise RevlogError( _("integrity check failed on %s:%d") % (self.indexfile, rev)) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given node""" cachedrev = None if node == nullid: return "" if self._cache: if self._cache[0] == node: return self._cache[2] cachedrev = self._cache[1] # look up what we need to read text = None rev = self.rev(node) base = self.base(rev) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError(_('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain self._loadindex(base, rev + 1) chain = [] index = self.index # for performance iterrev = rev e = index[iterrev] while iterrev != base and iterrev != cachedrev: chain.append(iterrev) if e[0] & REVIDX_PARENTDELTA: iterrev = e[5] else: iterrev -= 1 e = index[iterrev] chain.reverse() base = iterrev if iterrev == cachedrev: # cache hit text = self._cache[2] # drop cache to save memory self._cache = None self._chunkraw(base, rev) if text is None: text = self._chunk(base) bins = [self._chunk(r) for r in chain] text = mdiff.patches(text, bins) p1, p2 = self.parents(node) if (node != hash(text, p1, p2) and not (self.flags(rev) & REVIDX_PUNCHED_FLAG)): raise RevlogError(_("integrity check failed on %s:%d") % (self.indexfile, rev)) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given node""" cachedrev = None if node == nullid: return "" if self._cache: if self._cache[0] == node: return self._cache[2] cachedrev = self._cache[1] # look up what we need to read text = None rev = self.rev(node) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError( _('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain chain = [] index = self.index # for performance generaldelta = self._generaldelta iterrev = rev e = index[iterrev] while iterrev != e[3] and iterrev != cachedrev: chain.append(iterrev) if generaldelta: iterrev = e[3] else: iterrev -= 1 e = index[iterrev] chain.reverse() base = iterrev if iterrev == cachedrev: # cache hit text = self._cache[2] # drop cache to save memory self._cache = None self._chunkraw(base, rev) if text is None: text = self._chunkbase(base) bins = [self._chunk(r) for r in chain] text = mdiff.patches(text, bins) text = self._checkhash(text, node, rev) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given node""" cachedrev = None if node == nullid: return "" if self._cache: if self._cache[0] == node: return self._cache[2] cachedrev = self._cache[1] # look up what we need to read text = None rev = self.rev(node) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError(_('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain chain = [] index = self.index # for performance generaldelta = self._generaldelta iterrev = rev e = index[iterrev] while iterrev != e[3] and iterrev != cachedrev: chain.append(iterrev) if generaldelta: iterrev = e[3] else: iterrev -= 1 e = index[iterrev] chain.reverse() base = iterrev if iterrev == cachedrev: # cache hit text = self._cache[2] # drop cache to save memory self._cache = None self._chunkraw(base, rev) if text is None: text = self._chunkbase(base) bins = [self._chunk(r) for r in chain] text = mdiff.patches(text, bins) text = self._checkhash(text, node, rev) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given""" if node == nullid: return "" if self._cache and self._cache[0] == node: return str(self._cache[2]) # look up what we need to read text = None rev = self.rev(node) base = self.base(rev) # check rev flags if self.index[rev][0] & 0xFFFF: raise RevlogError( _('incompatible revision flag %x') % (self.index[rev][0] & 0xFFFF)) df = None # do we have useful data cached? if self._cache and self._cache[1] >= base and self._cache[1] < rev: base = self._cache[1] text = str(self._cache[2]) self._loadindex(base, rev + 1) if not self._inline and rev > base + 1: df = self.opener(self.datafile) else: self._loadindex(base, rev + 1) if not self._inline and rev > base: df = self.opener(self.datafile) text = self.chunk(base, df=df) bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] text = mdiff.patches(text, bins) p1, p2 = self.parents(node) if node != hash(text, p1, p2): raise RevlogError( _("integrity check failed on %s:%d") % (self.datafile, rev)) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given node""" if node == nullid: return "" if self._cache and self._cache[0] == node: return str(self._cache[2]) # look up what we need to read text = None rev = self.rev(node) base = self.base(rev) # check rev flags if self.index[rev][0] & 0xFFFF: raise RevlogError(_('incompatible revision flag %x') % (self.index[rev][0] & 0xFFFF)) df = None # do we have useful data cached? if self._cache and self._cache[1] >= base and self._cache[1] < rev: base = self._cache[1] text = str(self._cache[2]) self._loadindex(base, rev + 1) if not self._inline and rev > base + 1: df = self.opener(self.datafile) else: self._loadindex(base, rev + 1) if not self._inline and rev > base: df = self.opener(self.datafile) text = self.chunk(base, df=df) bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)] text = mdiff.patches(text, bins) p1, p2 = self.parents(node) if node != hash(text, p1, p2): raise RevlogError(_("integrity check failed on %s:%d") % (self.datafile, rev)) self._cache = (node, rev, text) return text
def revision(self, node): """return an uncompressed revision of a given""" if node == nullid: return "" text = None chain = [] iter_node = node rev = self.rev(iter_node) # reconstruct the revision if it is from a changegroup while self.bundle(rev): if self._cache and self._cache[0] == iter_node: text = self._cache[2] break chain.append(rev) iter_node = self.bundlebase(rev) rev = self.rev(iter_node) if text is None: text = revlog.revlog.revision(self, iter_node) while chain: delta = self.chunk(chain.pop()) text = mdiff.patches(text, [delta]) p1, p2 = self.parents(node) if node != revlog.hash(text, p1, p2): self._cache = (node, self.rev(node), text) return text def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): raise NotImplementedError def addgroup(self, revs, linkmapper, transaction): raise NotImplementedError def strip(self, rev, minlink): raise NotImplementedError def checksize(self): raise NotImplementedError
def addgroup(self, revs, linkmapper, transaction): """ add a delta group given a set of deltas, add them to the revision log. the first delta is against its parent, which should be in our log, the rest are against the previous delta. """ #track the base of the current delta log r = len(self) t = r - 1 node = None base = prev = nullrev start = end = textlen = 0 if r: end = self.end(t) ifh = self.opener(self.indexfile, "a+") isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) dfh = None else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) dfh = self.opener(self.datafile, "a") try: # loop through our set of deltas chain = None for chunk in revs: node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) link = linkmapper(cs) if node in self.nodemap: # this can happen if two branches make the same change chain = node continue delta = buffer(chunk, 80) del chunk for p in (p1, p2): if not p in self.nodemap: raise LookupError(p, self.indexfile, _('unknown parent')) if not chain: # retrieve the parent revision of the delta chain chain = p1 if not chain in self.nodemap: raise LookupError(chain, self.indexfile, _('unknown base')) # full versions are inserted when the needed deltas become # comparable to the uncompressed text or when the previous # version is not the one we have a delta against. We use # the size of the previous full rev as a proxy for the # current size. if chain == prev: cdelta = compress(delta) cdeltalen = len(cdelta[0]) + len(cdelta[1]) textlen = mdiff.patchedsize(textlen, delta) if chain != prev or (end - start + cdeltalen) > textlen * 2: # flush our writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() text = self.revision(chain) if len(text) == 0: # skip over trivial delta header text = buffer(delta, 12) else: text = mdiff.patches(text, [delta]) del delta chk = self._addrevision(text, transaction, link, p1, p2, None, ifh, dfh) if not dfh and not self._inline: # addrevision switched from inline to conventional # reopen the index dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") if chk != node: raise RevlogError(_("consistency error adding group")) textlen = len(text) else: e = (offset_type(end, 0), cdeltalen, textlen, base, link, self.rev(p1), self.rev(p2), node) self.index.insert(-1, e) self.nodemap[node] = r entry = self._io.packentry(e, self.node, self.version, r) if self._inline: ifh.write(entry) ifh.write(cdelta[0]) ifh.write(cdelta[1]) self.checkinlinesize(transaction, ifh) if not self._inline: dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") else: dfh.write(cdelta[0]) dfh.write(cdelta[1]) ifh.write(entry) t, r, chain, prev = r, r + 1, node, node base = self.base(t) start = self.start(base) end = self.end(t) finally: if dfh: dfh.close() ifh.close() return node
def revision(self, nodeorrev): """return an uncompressed revision of a given node or revision number. """ if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) else: node = nodeorrev rev = None _cache = self._cache # grab local copy of cache to avoid thread race cachedrev = None if node == nullid: return "" if _cache: if _cache[0] == node: return _cache[2] cachedrev = _cache[1] # look up what we need to read text = None if rev is None: rev = self.rev(node) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError( _('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain chain = [] index = self.index # for performance generaldelta = self._generaldelta iterrev = rev e = index[iterrev] while iterrev != e[3] and iterrev != cachedrev: chain.append(iterrev) if generaldelta: iterrev = e[3] else: iterrev -= 1 e = index[iterrev] if iterrev == cachedrev: # cache hit text = _cache[2] else: chain.append(iterrev) chain.reverse() # drop cache to save memory self._cache = None bins = self._chunks(chain) if text is None: text = str(bins[0]) bins = bins[1:] text = mdiff.patches(text, bins) text = self._checkhash(text, node, rev) self._cache = (node, rev, text) return text
def addgroup(self, revs, linkmapper, transaction, unique=0): """ add a delta group given a set of deltas, add them to the revision log. the first delta is against its parent, which should be in our log, the rest are against the previous delta. """ #track the base of the current delta log r = self.count() t = r - 1 node = None base = prev = nullrev start = end = textlen = 0 if r: end = self.end(t) ifh = self.opener(self.indexfile, "a+") isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) dfh = None else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) dfh = self.opener(self.datafile, "a") try: # loop through our set of deltas chain = None for chunk in revs: node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) link = linkmapper(cs) if node in self.nodemap: # this can happen if two branches make the same change # if unique: # raise RevlogError(_("already have %s") % hex(node[:4])) chain = node continue delta = buffer(chunk, 80) del chunk for p in (p1, p2): if not p in self.nodemap: raise LookupError(p, self.indexfile, _('unknown parent')) if not chain: # retrieve the parent revision of the delta chain chain = p1 if not chain in self.nodemap: raise LookupError(chain, self.indexfile, _('unknown base')) # full versions are inserted when the needed deltas become # comparable to the uncompressed text or when the previous # version is not the one we have a delta against. We use # the size of the previous full rev as a proxy for the # current size. if chain == prev: cdelta = compress(delta) cdeltalen = len(cdelta[0]) + len(cdelta[1]) textlen = mdiff.patchedsize(textlen, delta) if chain != prev or (end - start + cdeltalen) > textlen * 2: # flush our writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() text = self.revision(chain) if len(text) == 0: # skip over trivial delta header text = buffer(delta, 12) else: text = mdiff.patches(text, [delta]) del delta chk = self._addrevision(text, transaction, link, p1, p2, None, ifh, dfh) if not dfh and not self._inline: # addrevision switched from inline to conventional # reopen the index dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") if chk != node: raise RevlogError(_("consistency error adding group")) textlen = len(text) else: e = (offset_type(end, 0), cdeltalen, textlen, base, link, self.rev(p1), self.rev(p2), node) self.index.insert(-1, e) self.nodemap[node] = r entry = self._io.packentry(e, self.node, self.version, r) if self._inline: ifh.write(entry) ifh.write(cdelta[0]) ifh.write(cdelta[1]) self.checkinlinesize(transaction, ifh) if not self._inline: dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") else: dfh.write(cdelta[0]) dfh.write(cdelta[1]) ifh.write(entry) t, r, chain, prev = r, r + 1, node, node base = self.base(t) start = self.start(base) end = self.end(t) finally: if dfh: dfh.close() ifh.close() return node
def revision(self, nodeorrev): """return an uncompressed revision of a given node or revision number. """ if isinstance(nodeorrev, int): rev = nodeorrev node = self.node(rev) else: node = nodeorrev rev = None _cache = self._cache # grab local copy of cache to avoid thread race cachedrev = None if node == nullid: return "" if _cache: if _cache[0] == node: return _cache[2] cachedrev = _cache[1] # look up what we need to read text = None if rev is None: rev = self.rev(node) # check rev flags if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: raise RevlogError(_('incompatible revision flag %x') % (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) # build delta chain chain = [] index = self.index # for performance generaldelta = self._generaldelta iterrev = rev e = index[iterrev] while iterrev != e[3] and iterrev != cachedrev: chain.append(iterrev) if generaldelta: iterrev = e[3] else: iterrev -= 1 e = index[iterrev] if iterrev == cachedrev: # cache hit text = _cache[2] else: chain.append(iterrev) chain.reverse() # drop cache to save memory self._cache = None bins = self._chunks(chain) if text is None: text = str(bins[0]) bins = bins[1:] text = mdiff.patches(text, bins) text = self._checkhash(text, node, rev) self._cache = (node, rev, text) return text