def get(self, count): if count <= 256: v = self.data[self.start:self.start + count] else: v = buffer(self.data, self.start, count) self.start += count return v
def __init__(self, filename, f): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) # Min size for 'L' is 4, which is sufficient for struct's '!I' self.fanout = array('L', struct.unpack('!256I', self.map)) self.fanout.append(0) # entry "-1" self.nsha = self.fanout[255] self.sha_ofs = 256 * 4 # Avoid slicing shatable for individual hashes (very high overhead) self.shatable = buffer(self.map, self.sha_ofs, self.nsha * 24)
def __init__(self, filename, f): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) assert self.map[0:8] == b'\377tOc\0\0\0\2' # Min size for 'L' is 4, which is sufficient for struct's '!I' self.fanout = array('L', struct.unpack_from('!256I', self.map, offset=8)) self.fanout.append(0) self.nsha = self.fanout[255] self.sha_ofs = 8 + 256*4 self.ofstable_ofs = self.sha_ofs + self.nsha * 20 + self.nsha * 4 self.ofs64table_ofs = self.ofstable_ofs + self.nsha * 4 # Avoid slicing this for individual hashes (very high overhead) self.shatable = buffer(self.map, self.sha_ofs, self.nsha*20)
def _splitbuf(buf, basebits, fanbits): while 1: b = buf.peek(buf.used()) (ofs, bits) = _helpers.splitbuf(b) if ofs: if ofs > BLOB_MAX: ofs = BLOB_MAX level = 0 else: level = (bits - basebits) // fanbits # integer division buf.eat(ofs) yield buffer(b, 0, ofs), level else: break while buf.used() >= BLOB_MAX: # limit max blob size yield buf.get(BLOB_MAX), 0
def peek(self, count): return buffer(self.data, self.start, count)
def put(self, s): if s: self.data = join_bytes(buffer(self.data, self.start), s) self.start = 0
def peek(self, count): if count <= 256: return self.data[self.start:self.start + count] return buffer(self.data, self.start, count)