def _addrevision(self, node, text, transaction, link, p1, p2, cachedelta, ifh, dfh): """internal function to add revisions to the log see addrevision for argument descriptions. invariants: - text is optional (can be None); if not set, cachedelta must be set. if both are set, they must correspond to eachother. """ btext = [text] def buildtext(): if btext[0] is not None: return btext[0] # flush any pending writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() basetext = self.revision(self.node(cachedelta[0])) btext[0] = mdiff.patch(basetext, cachedelta[1]) chk = hash(btext[0], p1, p2) if chk != node: raise RevlogError(_("consistency error in delta")) return btext[0] def builddelta(rev): # can we use the cached delta? if cachedelta and cachedelta[0] == rev: delta = cachedelta[1] else: t = buildtext() ptext = self.revision(self.node(rev)) delta = mdiff.textdiff(ptext, t) data = compress(delta) l = len(data[1]) + len(data[0]) if basecache[0] == rev: chainbase = basecache[1] else: chainbase = self.chainbase(rev) dist = l + offset - self.start(chainbase) if self._generaldelta: base = rev else: base = chainbase return dist, l, data, base, chainbase curr = len(self) prev = curr - 1 base = chainbase = curr offset = self.end(prev) flags = 0 d = None basecache = self._basecache p1r, p2r = self.rev(p1), self.rev(p2) # should we try to build a delta? if prev != nullrev: if self._generaldelta: if p1r >= basecache[1]: d = builddelta(p1r) elif p2r >= basecache[1]: d = builddelta(p2r) else: d = builddelta(prev) else: d = builddelta(prev) dist, l, data, base, chainbase = d # full versions are inserted when the needed deltas # become comparable to the uncompressed text if text is None: textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), cachedelta[1]) else: textlen = len(text) if d is None or dist > textlen * 2: text = buildtext() data = compress(text) l = len(data[1]) + len(data[0]) base = chainbase = curr e = (offset_type(offset, flags), l, textlen, base, link, p1r, p2r, node) self.index.insert(-1, e) self.nodemap[node] = curr entry = self._io.packentry(e, self.node, self.version, curr) if not self._inline: transaction.add(self.datafile, offset) transaction.add(self.indexfile, curr * len(entry)) if data[0]: dfh.write(data[0]) dfh.write(data[1]) dfh.flush() ifh.write(entry) else: offset += curr * self._io.size transaction.add(self.indexfile, offset, curr) ifh.write(entry) ifh.write(data[0]) ifh.write(data[1]) self.checkinlinesize(transaction, ifh) if type(text) == str: # only accept immutable objects self._cache = (node, curr, text) self._basecache = (curr, chainbase) return node
def _addrevision(self, node, text, transaction, link, p1, p2, cachedelta, ifh, dfh): btext = [text] def buildtext(): if btext[0] is not None: return btext[0] # flush any pending writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() basetext = self.revision(self.node(cachedelta[0])) btext[0] = mdiff.patch(basetext, cachedelta[1]) chk = hash(btext[0], p1, p2) if chk != node: raise RevlogError(_("consistency error in delta")) return btext[0] def builddelta(rev): # can we use the cached delta? if cachedelta and cachedelta[0] == rev: delta = cachedelta[1] else: t = buildtext() ptext = self.revision(self.node(rev)) delta = mdiff.textdiff(ptext, t) data = compress(delta) l = len(data[1]) + len(data[0]) base = self.base(rev) dist = l + offset - self.start(base) return dist, l, data, base curr = len(self) prev = curr - 1 base = curr offset = self.end(prev) flags = 0 d = None p1r, p2r = self.rev(p1), self.rev(p2) # should we try to build a delta? if prev != nullrev: d = builddelta(prev) if self._parentdelta and prev != p1r: d2 = builddelta(p1r) if d2 < d: d = d2 flags = REVIDX_PARENTDELTA dist, l, data, base = d # full versions are inserted when the needed deltas # become comparable to the uncompressed text # or the base revision is punched if text is None: textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), cachedelta[1]) else: textlen = len(text) if (d is None or dist > textlen * 2 or (self.flags(base) & REVIDX_PUNCHED_FLAG)): text = buildtext() data = compress(text) l = len(data[1]) + len(data[0]) base = curr e = (offset_type(offset, flags), l, textlen, base, link, p1r, p2r, node) self.index.insert(-1, e) self.nodemap[node] = curr entry = self._io.packentry(e, self.node, self.version, curr) if not self._inline: transaction.add(self.datafile, offset) transaction.add(self.indexfile, curr * len(entry)) if data[0]: dfh.write(data[0]) dfh.write(data[1]) dfh.flush() ifh.write(entry) else: offset += curr * self._io.size transaction.add(self.indexfile, offset, curr) ifh.write(entry) ifh.write(data[0]) ifh.write(data[1]) self.checkinlinesize(transaction, ifh) if type(text) == str: # only accept immutable objects self._cache = (node, curr, text) return node
def addgroup(self, revs, linkmapper, transaction): """ add a delta group given a set of deltas, add them to the revision log. the first delta is against its parent, which should be in our log, the rest are against the previous delta. """ #track the base of the current delta log r = len(self) t = r - 1 node = None base = prev = nullrev start = end = textlen = 0 if r: end = self.end(t) ifh = self.opener(self.indexfile, "a+") isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) dfh = None else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) dfh = self.opener(self.datafile, "a") try: # loop through our set of deltas chain = None for chunk in revs: node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) link = linkmapper(cs) if node in self.nodemap: # this can happen if two branches make the same change chain = node continue delta = buffer(chunk, 80) del chunk for p in (p1, p2): if not p in self.nodemap: raise LookupError(p, self.indexfile, _('unknown parent')) if not chain: # retrieve the parent revision of the delta chain chain = p1 if not chain in self.nodemap: raise LookupError(chain, self.indexfile, _('unknown base')) # full versions are inserted when the needed deltas become # comparable to the uncompressed text or when the previous # version is not the one we have a delta against. We use # the size of the previous full rev as a proxy for the # current size. if chain == prev: cdelta = compress(delta) cdeltalen = len(cdelta[0]) + len(cdelta[1]) textlen = mdiff.patchedsize(textlen, delta) if chain != prev or (end - start + cdeltalen) > textlen * 2: # flush our writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() text = self.revision(chain) if len(text) == 0: # skip over trivial delta header text = buffer(delta, 12) else: text = mdiff.patches(text, [delta]) del delta chk = self._addrevision(text, transaction, link, p1, p2, None, ifh, dfh) if not dfh and not self._inline: # addrevision switched from inline to conventional # reopen the index dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") if chk != node: raise RevlogError(_("consistency error adding group")) textlen = len(text) else: e = (offset_type(end, 0), cdeltalen, textlen, base, link, self.rev(p1), self.rev(p2), node) self.index.insert(-1, e) self.nodemap[node] = r entry = self._io.packentry(e, self.node, self.version, r) if self._inline: ifh.write(entry) ifh.write(cdelta[0]) ifh.write(cdelta[1]) self.checkinlinesize(transaction, ifh) if not self._inline: dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") else: dfh.write(cdelta[0]) dfh.write(cdelta[1]) ifh.write(entry) t, r, chain, prev = r, r + 1, node, node base = self.base(t) start = self.start(base) end = self.end(t) finally: if dfh: dfh.close() ifh.close() return node
def addgroup(self, revs, linkmapper, transaction, unique=0): """ add a delta group given a set of deltas, add them to the revision log. the first delta is against its parent, which should be in our log, the rest are against the previous delta. """ #track the base of the current delta log r = self.count() t = r - 1 node = None base = prev = nullrev start = end = textlen = 0 if r: end = self.end(t) ifh = self.opener(self.indexfile, "a+") isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) dfh = None else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) dfh = self.opener(self.datafile, "a") try: # loop through our set of deltas chain = None for chunk in revs: node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) link = linkmapper(cs) if node in self.nodemap: # this can happen if two branches make the same change # if unique: # raise RevlogError(_("already have %s") % hex(node[:4])) chain = node continue delta = buffer(chunk, 80) del chunk for p in (p1, p2): if not p in self.nodemap: raise LookupError(p, self.indexfile, _('unknown parent')) if not chain: # retrieve the parent revision of the delta chain chain = p1 if not chain in self.nodemap: raise LookupError(chain, self.indexfile, _('unknown base')) # full versions are inserted when the needed deltas become # comparable to the uncompressed text or when the previous # version is not the one we have a delta against. We use # the size of the previous full rev as a proxy for the # current size. if chain == prev: cdelta = compress(delta) cdeltalen = len(cdelta[0]) + len(cdelta[1]) textlen = mdiff.patchedsize(textlen, delta) if chain != prev or (end - start + cdeltalen) > textlen * 2: # flush our writes here so we can read it in revision if dfh: dfh.flush() ifh.flush() text = self.revision(chain) if len(text) == 0: # skip over trivial delta header text = buffer(delta, 12) else: text = mdiff.patches(text, [delta]) del delta chk = self._addrevision(text, transaction, link, p1, p2, None, ifh, dfh) if not dfh and not self._inline: # addrevision switched from inline to conventional # reopen the index dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") if chk != node: raise RevlogError(_("consistency error adding group")) textlen = len(text) else: e = (offset_type(end, 0), cdeltalen, textlen, base, link, self.rev(p1), self.rev(p2), node) self.index.insert(-1, e) self.nodemap[node] = r entry = self._io.packentry(e, self.node, self.version, r) if self._inline: ifh.write(entry) ifh.write(cdelta[0]) ifh.write(cdelta[1]) self.checkinlinesize(transaction, ifh) if not self._inline: dfh = self.opener(self.datafile, "a") ifh = self.opener(self.indexfile, "a") else: dfh.write(cdelta[0]) dfh.write(cdelta[1]) ifh.write(entry) t, r, chain, prev = r, r + 1, node, node base = self.base(t) start = self.start(base) end = self.end(t) finally: if dfh: dfh.close() ifh.close() return node