Ejemplo n.º 1
0
 def check_record_pack_unpack(self):
     oid = as_bytes('0'*8)
     data = as_bytes('sample')
     reflist = ['1'*8, '2'*8]
     reflist =  list(map(as_bytes, reflist))
     refs = join_bytes(reflist)
     result=unpack_record(pack_record(oid, data, refs))
     assert result[0] == oid
     assert result[1] == data
     assert split_oids(result[2]) == reflist
     assert split_oids('') == []
Ejemplo n.º 2
0
    def gen_oid_record(self, start_oid=None, batch_size=100):
        """(start_oid:str = None, batch_size:int = 100) ->
            sequence((oid:str, record:str))
        Returns a generator for the sequence of (oid, record) pairs.

        If a start_oid is given, the resulting sequence follows a
        breadth-first traversal of the object graph, starting at the given
        start_oid.  This uses the storage's bulk_load() method because that
        is faster in some cases.  The batch_size argument sets the number
        of object records loaded on each call to bulk_load().

        If no start_oid is given, the sequence may include oids and records
        that are not reachable from the root.
        """
        if start_oid is None:
            start_oid = durus.connection.ROOT_OID
        todo = deque([start_oid])
        seen = set()
        while todo:
            batch = []
            while todo and len(batch) < batch_size:
                oid = todo.popleft()
                if oid not in seen:
                    batch.append(oid)
                    seen.add(oid)
            for record in self.bulk_load(batch):
                oid, data, refdata = unpack_record(record)
                yield oid, record
                for ref in split_oids(refdata):
                    if ref not in seen:
                        todo.append(ref)
Ejemplo n.º 3
0
 def gen_reachable_records():
     # we order the todo queue by file offset. The hope is that the
     # packed file will be mostly the same as the old file in order
     # to speed up the rsync delta process.
     default_rank = 2**64
     pack_todo = [(0, durus.connection.ROOT_OID)]
     while pack_todo or self.pack_extra:
         if self.pack_extra:
             oid = self.pack_extra.pop()
             # note we don't check 'index' because it could be an
             # object that got updated since the pack began and in
             # that case we have to write the new record to the pack
             # file
         else:
             rank, oid = heapq.heappop(pack_todo)
             if oid in index:
                 # we already wrote this object record
                 continue
         record = self.load(oid)
         oid2, data, refdata = unpack_record(record)
         assert oid == oid2
         # ensure we have records for objects referenced
         for ref_oid in split_oids(refdata):
             item = (self.index.get(ref_oid, default_rank), ref_oid)
             heapq.heappush(pack_todo, item)
         yield (oid, record)
Ejemplo n.º 4
0
 def handle_B(self, s):
     # bulk read of objects
     number_of_oids = read_int4(s)
     oid_str = read(s, 8 * number_of_oids)
     oids = split_oids(oid_str)
     for oid in oids:
         self._send_load_response(s, oid)
Ejemplo n.º 5
0
 def handle_B(self, s):
     # bulk read of objects
     number_of_oids = read_int4(s)
     oid_str = read(s, 8 * number_of_oids)
     oids = split_oids(oid_str)
     for oid in oids:
         self._send_load_response(s, oid)
Ejemplo n.º 6
0
 def end(self, handle_invalidations=None):
     write(self.s, 'C')
     n = read_int4(self.s)
     oid_list = []
     if n != 0:
         packed_oids = read(self.s, n * 8)
         oid_list = split_oids(packed_oids)
         try:
             handle_invalidations(oid_list)
         except ConflictError:
             self.transaction_new_oids.reverse()
             self.oid_pool.extend(self.transaction_new_oids)
             assert len(self.oid_pool) == len(set(self.oid_pool))
             self.begin()  # clear out records and transaction_new_oids.
             write_int4(self.s, 0)  # Tell server we are done.
             raise
     tdata = []
     for oid, record in iteritems(self.records):
         tdata.append(int4_to_str(8 + len(record)))
         tdata.append(as_bytes(oid))
         tdata.append(record)
     tdata = join_bytes(tdata)
     write_int4_str(self.s, tdata)
     self.records.clear()
     if len(tdata) > 0:
         status = read(self.s, 1)
         if status == STATUS_OKAY:
             pass
         elif status == STATUS_INVALID:
             raise WriteConflictError()
         else:
             raise ProtocolError('server returned invalid status %r' %
                                 status)
Ejemplo n.º 7
0
 def end(self, handle_invalidations=None):
     write(self.s, 'C')
     n = read_int4(self.s)
     oid_list = []
     if n != 0:
         packed_oids = read(self.s, n*8)
         oid_list = split_oids(packed_oids)
         try:
             handle_invalidations(oid_list)
         except ConflictError:
             self.transaction_new_oids.reverse()
             self.oid_pool.extend(self.transaction_new_oids)
             assert len(self.oid_pool) == len(set(self.oid_pool))
             self.begin() # clear out records and transaction_new_oids.
             write_int4(self.s, 0) # Tell server we are done.
             raise
     tdata = []
     for oid, record in iteritems(self.records):
         tdata.append(int4_to_str(8 + len(record)))
         tdata.append(as_bytes(oid))
         tdata.append(record)
     tdata = join_bytes(tdata)
     write_int4_str(self.s, tdata)
     self.records.clear()
     if len(tdata) > 0:
         status = read(self.s, 1)
         if status == STATUS_OKAY:
             pass
         elif status == STATUS_INVALID:
             raise WriteConflictError()
         else:
             raise ProtocolError('server returned invalid status %r' % status)
Ejemplo n.º 8
0
 def sync(self):
     write(self.s, 'S')
     n = read_int4(self.s)
     if n == 0:
         packed_oids = ''
     else:
         packed_oids = read(self.s, n*8)
     return split_oids(packed_oids)
Ejemplo n.º 9
0
def gen_referring_oid_record(storage, referred_oid):
    """(storage:Storage, referred_oid:str) -> sequence([oid:str, record:str])
    Generate oid, record pairs for all objects that include a
    reference to the `referred_oid`.
    """
    for oid, record in storage.gen_oid_record():
        if referred_oid in split_oids(unpack_record(record)[2]):
            yield oid, record
Ejemplo n.º 10
0
 def sync(self):
     write(self.s, 'S')
     n = read_int4(self.s)
     if n == 0:
         packed_oids = ''
     else:
         packed_oids = read(self.s, n * 8)
     return split_oids(packed_oids)
Ejemplo n.º 11
0
 def _get_refs(self, oid):
     c = self._conn.cursor()
     c.execute('SELECT refs FROM objects WHERE id = ?',
             (str_to_int8(oid),))
     v = c.fetchone()
     if v is None:
         raise KeyError(oid)
     return split_oids(v[0])
Ejemplo n.º 12
0
 def sync(self):
     self._send_command('S')
     n = read_int4(self.socket)
     if n == 0:
         packed_oids = ''
     else:
         packed_oids = read(self.socket, 8 * n)
     return split_oids(packed_oids)
Ejemplo n.º 13
0
 def sync(self):
     self.s.sendall('S')
     n = u32(recv(self.s, 4))
     if n == 0:
         packed_oids = ''
     else:
         packed_oids = recv(self.s, n*8)
     return split_oids(packed_oids)
Ejemplo n.º 14
0
def get_reference_index(storage):
    """(storage:Storage) -> {oid:str : [referring_oid:str]}
    Return a full index giving the referring oids for each oid.
    This might be large.
    """
    result = {}
    for oid, record in storage.gen_oid_record():
        for ref in split_oids(unpack_record(record)[2]):
            result.setdefault(ref, []).append(oid)
    return result
Ejemplo n.º 15
0
 def handle_bulk_read(self, client, db_name):
     # B
     log(20, 'Bulk read %s' % db_name)
     storage = self.storages[db_name]
     number_of_oids = str_to_int4((yield client.read(4)))
     oid_str_len = 8 * number_of_oids
     oid_str = yield client.read(oid_str_len)
     oids = split_oids(oid_str)
     for oid in oids:
         yield self._send_load_response(client, db_name, storage, oid)
Ejemplo n.º 16
0
 def new_oid(self):
     if not self.oid_pool:
         batch = self.oid_pool_size
         write(self.s, 'M%s' % chr(batch))
         self.oid_pool = split_oids(read(self.s, 8 * batch))
         self.oid_pool.reverse()
         assert len(self.oid_pool) == len(set(self.oid_pool))
     oid = self.oid_pool.pop()
     assert oid not in self.oid_pool
     self.transaction_new_oids.append(oid)
     return oid
Ejemplo n.º 17
0
 def new_oid(self):
     if not self.oid_pool:
         batch = self.oid_pool_size
         write(self.s, 'M%s' % chr(batch))
         self.oid_pool = split_oids(read(self.s, 8 * batch))
         self.oid_pool.reverse()
         assert len(self.oid_pool) == len(set(self.oid_pool))
     oid = self.oid_pool.pop()
     assert oid not in self.oid_pool
     self.transaction_new_oids.append(oid)
     return oid
Ejemplo n.º 18
0
 def new_oid(self):
     if not self.oid_pool:
         batch = self.oid_pool_size
         self._send_command('M')
         write(self.socket, chr(batch))
         self.oid_pool = split_oids(read(self.socket, 8 * batch))
         self.oid_pool.reverse()
         assert len(self.oid_pool) == len(set(self.oid_pool))
     oid = self.oid_pool.pop()
     assert oid not in self.oid_pool
     self.transaction_new_oids.append(oid)
     return oid
Ejemplo n.º 19
0
 def gen_reachable_records():
     todo = [ROOT_OID]
     seen = Set()
     while todo:
         oid = todo.pop()
         if oid in seen:
             continue
         seen.add(oid)
         record = self.load(oid)
         record_oid, data, refdata = unpack_record(record)
         assert oid == record_oid
         todo.extend(split_oids(refdata))
         yield oid, record
     while self.pack_extra:
         oid = self.pack_extra.pop()
         yield oid, self.load(oid)
Ejemplo n.º 20
0
 def gen_oid_record(self, start_oid=None, **other):
     if start_oid is None:
         for item in iteritems(self.shelf):
             yield item
     else:
         todo = [start_oid]
         seen = IntSet() # This eventually contains them all.
         while todo:
             oid = todo.pop()
             if str_to_int8(oid) in seen:
                 continue
             seen.add(str_to_int8(oid))
             record = self.load(oid)
             record_oid, data, refdata = unpack_record(record)
             assert oid == record_oid
             todo.extend(split_oids(refdata))
             yield oid, record
Ejemplo n.º 21
0
 def gen_oid_record(self, start_oid=None, **other):
     if start_oid is None:
         for item in iteritems(self.shelf):
             yield item
     else:
         todo = [start_oid]
         seen = IntSet()  # This eventually contains them all.
         while todo:
             oid = todo.pop()
             if str_to_int8(oid) in seen:
                 continue
             seen.add(str_to_int8(oid))
             record = self.load(oid)
             record_oid, data, refdata = unpack_record(record)
             assert oid == record_oid
             todo.extend(split_oids(refdata))
             yield oid, record
Ejemplo n.º 22
0
 def gen_oid_record(self, start_oid=None, seen=None, **other):
     if start_oid is None:
         for item in iteritems(self.shelf):
             yield item
     else:
         todo = [start_oid]
         if seen is None:
             seen = IntSet() # This eventually contains them all.
         while todo:
             oid = heapq.heappop(todo)
             if str_to_int8(oid) in seen:
                 continue
             seen.add(str_to_int8(oid))
             record = self.load(oid)
             record_oid, data, refdata = unpack_record(record)
             assert oid == record_oid
             for ref_oid in split_oids(refdata):
                 heapq.heappush(todo, ref_oid)
             yield oid, record
Ejemplo n.º 23
0
 def gen_reachable_records():
     pack_todo = [durus.connection.ROOT_OID]
     while pack_todo or self.pack_extra:
         if self.pack_extra:
             oid = self.pack_extra.pop()
             # note we don't check 'index' because it could be an
             # object that got updated since the pack began and in
             # that case we have to write the new record to the pack
             # file
         else:
             oid = heapq.heappop(pack_todo)
             if oid in index:
                 # we already wrote this object record
                 continue
         record = self.load(oid)
         oid2, data, refdata = unpack_record(record)
         assert oid == oid2
         # ensure we have records for objects referenced
         for ref_oid in split_oids(refdata):
             heapq.heappush(pack_todo, ref_oid)
         yield (oid, record)
Ejemplo n.º 24
0
 def end(self, handle_invalidations=None):
     self.s.sendall('C')
     n = u32(recv(self.s, 4))
     if n != 0:
         packed_oids = recv(self.s, n*8)
         try:
             handle_invalidations(split_oids(packed_oids))
         except ConflictError:
             self.s.sendall(p32(0)) # Tell server we are done.
             raise
     tdata = []
     for oid, record in self.records.iteritems():
         tdata.append(p32(8 + len(record)))
         tdata.append(oid)
         tdata.append(record)
     tdata = ''.join(tdata)
     self.s.sendall(p32(len(tdata)))
     self.s.sendall(tdata)
     self.records.clear()
     status = recv(self.s, 1)
     if status != STATUS_OKAY:
         raise ProtocolError, 'server returned invalid status %r' % status