Beispiel #1
0
 def testBasicStore(self):
     cluster = NEOCluster()
     try:
         cluster.start()
         storage = cluster.getZODBStorage()
         data_info = {}
         compressible = 'x' * 20
         compressed = compress(compressible)
         oid_list = []
         if cluster.storage.getAdapter() == 'SQLite':
             big = None
             data = 'foo', '', 'foo', compressed, compressible
         else:
             big = os.urandom(65536) * 600
             assert len(big) < len(compress(big))
             data = ('foo', big, '', 'foo', big[:2**24-1], big,
                     compressed, compressible, big[:2**24])
             self.assertFalse(cluster.storage.sqlCount('bigdata'))
         self.assertFalse(cluster.storage.sqlCount('data'))
         for data in data:
             if data is compressible:
                 key = makeChecksum(compressed), 1
             else:
                 key = makeChecksum(data), 0
             oid = storage.new_oid()
             txn = transaction.Transaction()
             storage.tpc_begin(txn)
             r1 = storage.store(oid, None, data, '', txn)
             r2 = storage.tpc_vote(txn)
             data_info[key] = 1
             self.assertEqual(data_info, cluster.storage.getDataLockInfo())
             serial = storage.tpc_finish(txn)
             data_info[key] = 0
             self.tic()
             self.assertEqual(data_info, cluster.storage.getDataLockInfo())
             self.assertEqual((data, serial), storage.load(oid, ''))
             storage._cache.clear()
             self.assertEqual((data, serial), storage.load(oid, ''))
             self.assertEqual((data, serial), storage.load(oid, ''))
             oid_list.append((oid, data, serial))
         if big:
             self.assertTrue(cluster.storage.sqlCount('bigdata'))
         self.assertTrue(cluster.storage.sqlCount('data'))
         for i, (oid, data, serial) in enumerate(oid_list, 1):
             storage._cache.clear()
             cluster.storage.dm.deleteObject(oid)
             self.assertRaises(POSException.POSKeyError,
                 storage.load, oid, '')
             for oid, data, serial in oid_list[i:]:
                 self.assertEqual((data, serial), storage.load(oid, ''))
         if big:
             self.assertFalse(cluster.storage.sqlCount('bigdata'))
         self.assertFalse(cluster.storage.sqlCount('data'))
     finally:
         cluster.stop()
Beispiel #2
0
    def _store(self,
               txn_context,
               oid,
               serial,
               data,
               data_serial=None,
               unlock=False):
        ttid = txn_context['ttid']
        if data is None:
            # This is some undo: either a no-data object (undoing object
            # creation) or a back-pointer to an earlier revision (going back to
            # an older object revision).
            compressed_data = ''
            compression = 0
            checksum = ZERO_HASH
        else:
            assert data_serial is None
            size = len(data)
            if self.compress:
                compressed_data = compress(data)
                if size < len(compressed_data):
                    compressed_data = data
                    compression = 0
                else:
                    compression = 1
            else:
                compression = 0
                compressed_data = data
            checksum = makeChecksum(compressed_data)
            txn_context['data_size'] += size
        on_timeout = partial(
            self.onStoreTimeout,
            txn_context=txn_context,
            oid=oid,
        )
        # Store object in tmp cache
        txn_context['data_dict'][oid] = data
        # Store data on each node
        txn_context['object_stored_counter_dict'][oid] = {}
        serial = serial or ZERO_TID
        txn_context['object_base_serial_dict'].setdefault(oid, serial)
        txn_context['object_serial_dict'][oid] = serial
        queue = txn_context['queue']
        involved_nodes = txn_context['involved_nodes']
        add_involved_nodes = involved_nodes.add
        packet = Packets.AskStoreObject(oid, serial, compression, checksum,
                                        compressed_data, data_serial, ttid,
                                        unlock)
        for node, conn in self.cp.iterateForObject(oid):
            try:
                conn.ask(packet, on_timeout=on_timeout, queue=queue)
                add_involved_nodes(node)
            except ConnectionClosed:
                continue
        if not involved_nodes:
            raise NEOStorageError("Store failed")

        while txn_context['data_size'] >= self._cache._max_size:
            self._waitAnyTransactionMessage(txn_context)
        self._waitAnyTransactionMessage(txn_context, False)
Beispiel #3
0
 def __dump(self, storage, sorted=sorted):
     return {
         u64(t.tid): sorted(
             (u64(o.oid), o.data_txn and u64(o.data_txn),
              None if o.data is None else makeChecksum(o.data)) for o in t)
         for t in storage.iterator()
     }
Beispiel #4
0
    def _store(self, txn_context, oid, serial, data, data_serial=None):
        ttid = txn_context.ttid
        if data is None:
            # This is some undo: either a no-data object (undoing object
            # creation) or a back-pointer to an earlier revision (going back to
            # an older object revision).
            compressed_data = ''
            compression = 0
            checksum = ZERO_HASH
        else:
            assert data_serial is None
            size = len(data)
            if self.compress:
                compressed_data = compress(data)
                if size < len(compressed_data):
                    compressed_data = data
                    compression = 0
                else:
                    compression = 1
            else:
                compression = 0
                compressed_data = data
            checksum = makeChecksum(compressed_data)
            txn_context.data_size += size
        # Store object in tmp cache
        packet = Packets.AskStoreObject(oid, serial, compression, checksum,
                                        compressed_data, data_serial, ttid)
        txn_context.data_dict[oid] = data, serial, txn_context.write(self,
                                                                     packet,
                                                                     oid,
                                                                     oid=oid)

        while txn_context.data_size >= self._cache._max_size:
            self._waitAnyTransactionMessage(txn_context)
        self._waitAnyTransactionMessage(txn_context, False)
Beispiel #5
0
 def askStorage(conn, packet):
     tid, next_tid, compression, checksum, data, data_tid \
         = self._askStorage(conn, packet)
     if data or checksum != ZERO_HASH:
         if checksum != makeChecksum(data):
             logging.error('wrong checksum from %s for oid %s',
                       conn, dump(oid))
             raise NEOStorageReadRetry(False)
         return (decompress_list[compression](data),
                 tid, next_tid, data_tid)
     raise NEOStorageCreationUndoneError(dump(oid))
Beispiel #6
0
    def _store(self, txn_context, oid, serial, data, data_serial=None,
            unlock=False):
        ttid = txn_context['ttid']
        if data is None:
            # This is some undo: either a no-data object (undoing object
            # creation) or a back-pointer to an earlier revision (going back to
            # an older object revision).
            compressed_data = ''
            compression = 0
            checksum = ZERO_HASH
        else:
            assert data_serial is None
            size = len(data)
            if self.compress:
                compressed_data = compress(data)
                if size < len(compressed_data):
                    compressed_data = data
                    compression = 0
                else:
                    compression = 1
            else:
                compression = 0
                compressed_data = data
            checksum = makeChecksum(compressed_data)
            txn_context['data_size'] += size
        on_timeout = partial(
            self.onStoreTimeout,
            txn_context=txn_context,
            oid=oid,
        )
        # Store object in tmp cache
        txn_context['data_dict'][oid] = data
        # Store data on each node
        txn_context['object_stored_counter_dict'][oid] = {}
        serial = serial or ZERO_TID
        txn_context['object_base_serial_dict'].setdefault(oid, serial)
        txn_context['object_serial_dict'][oid] = serial
        queue = txn_context['queue']
        involved_nodes = txn_context['involved_nodes']
        add_involved_nodes = involved_nodes.add
        packet = Packets.AskStoreObject(oid, serial, compression,
            checksum, compressed_data, data_serial, ttid, unlock)
        for node, conn in self.cp.iterateForObject(oid):
            try:
                conn.ask(packet, on_timeout=on_timeout, queue=queue)
                add_involved_nodes(node)
            except ConnectionClosed:
                continue
        if not involved_nodes:
            raise NEOStorageError("Store failed")

        while txn_context['data_size'] >= self._cache._max_size:
            self._waitAnyTransactionMessage(txn_context)
        self._waitAnyTransactionMessage(txn_context, False)
Beispiel #7
0
 def askStoreObject(self, conn, oid, serial,
         compression, checksum, data, data_serial, ttid, unlock):
     if 1 < compression:
         raise ProtocolError('invalid compression value')
     # register the transaction
     self.app.tm.register(conn.getUUID(), ttid)
     if data or checksum != ZERO_HASH:
         # TODO: return an appropriate error packet
         assert makeChecksum(data) == checksum
         assert data_serial is None
     else:
         checksum = data = None
     self._askStoreObject(conn, oid, serial, compression, checksum, data,
         data_serial, ttid, unlock, time.time())
Beispiel #8
0
 def getObject(self, oid, tid=None, before_tid=None):
     u64 = util.u64
     u_oid = u64(oid)
     u_tid = tid and u64(tid)
     u_before_tid = before_tid and u64(before_tid)
     db = self.db
     if self.zodb_tid < (u_before_tid -
                         1 if before_tid else u_tid or 0) <= self.zodb_ltid:
         o = None
     else:
         o = db.getObject(oid, tid, before_tid)
         if o and self.zodb_ltid < u64(o[0]) or \
            not self.inZodb(u_oid, u_tid, u_before_tid):
             return o
     p64 = util.p64
     zodb, z_oid = self.zodbFromOid(u_oid)
     try:
         value, serial, next_serial = zodb.loadBefore(
             p64(z_oid), before_tid
             or (util.p64(u_tid + 1) if tid else MAX_TID))
     except TypeError:  # loadBefore returned None
         return False
     except POSKeyError:
         assert not o, o
         return o
     if serial != tid:
         if tid:
             return False
         u_tid = u64(serial)
     if u_tid <= self.zodb_tid and o:
         return o
     if value:
         value = zodb.repickle(value)
         checksum = util.makeChecksum(value)
     else:
         # CAVEAT: Although we think loadBefore should not return an empty
         #         value for a deleted object (BBB: fixed in ZODB4),
         #         there's no need to distinguish this case in the above
         #         except clause because it would be crazy to import a
         #         NEO DB using this backend.
         checksum = None
     if not next_serial:
         next_serial = db._getNextTID(db._getPartition(u_oid), u_oid, u_tid)
         if next_serial:
             next_serial = p64(next_serial)
     return (serial, next_serial, 0, checksum, value,
             zodb.getDataTid(z_oid, u_tid))
Beispiel #9
0
 def getObject(self, oid, tid=None, before_tid=None):
     u64 = util.u64
     u_oid = u64(oid)
     u_tid = tid and u64(tid)
     u_before_tid = before_tid and u64(before_tid)
     db = self.db
     if self.zodb_tid < (u_before_tid - 1 if before_tid else
                         u_tid or 0) <= self.zodb_ltid:
         o = None
     else:
         o = db.getObject(oid, tid, before_tid)
         if o and self.zodb_ltid < u64(o[0]) or \
            not self.inZodb(u_oid, u_tid, u_before_tid):
             return o
     p64 = util.p64
     zodb, z_oid = self.zodbFromOid(u_oid)
     try:
         value, serial, next_serial = zodb.loadBefore(p64(z_oid),
             before_tid or (util.p64(u_tid + 1) if tid else MAX_TID))
     except TypeError: # loadBefore returned None
         return False
     except POSKeyError:
         assert not o, o
         return o
     if serial != tid:
         if tid:
             return False
         u_tid = u64(serial)
     if u_tid <= self.zodb_tid and o:
         return o
     if value:
         value = zodb.repickle(value)
         checksum = util.makeChecksum(value)
     else:
         # CAVEAT: Although we think loadBefore should not return an empty
         #         value for a deleted object (see comment in NEO Storage),
         #         there's no need to distinguish this case in the above
         #         except clause because it would be crazy to import a
         #         NEO DB using this backend.
         checksum = None
     if not next_serial:
         next_serial = db._getNextTID(db._getPartition(u_oid), u_oid, u_tid)
         if next_serial:
             next_serial = p64(next_serial)
     return (serial, next_serial,
         0, checksum, value, zodb.getDataTid(z_oid, u_tid))
Beispiel #10
0
    def testStorageDataLock(self):
        cluster = NEOCluster()
        try:
            cluster.start()
            storage = cluster.getZODBStorage()
            data_info = {}

            data = 'foo'
            key = makeChecksum(data), 0
            oid = storage.new_oid()
            txn = transaction.Transaction()
            storage.tpc_begin(txn)
            r1 = storage.store(oid, None, data, '', txn)
            r2 = storage.tpc_vote(txn)
            tid = storage.tpc_finish(txn)
            data_info[key] = 0
            storage.sync()

            txn = [transaction.Transaction() for x in xrange(3)]
            for t in txn:
                storage.tpc_begin(t)
                storage.store(tid and oid or storage.new_oid(),
                              tid, data, '', t)
                tid = None
            for t in txn:
                storage.tpc_vote(t)
            data_info[key] = 3
            self.assertEqual(data_info, cluster.storage.getDataLockInfo())

            storage.tpc_abort(txn[1])
            storage.sync()
            data_info[key] -= 1
            self.assertEqual(data_info, cluster.storage.getDataLockInfo())

            tid1 = storage.tpc_finish(txn[2])
            self.tic()
            data_info[key] -= 1
            self.assertEqual(data_info, cluster.storage.getDataLockInfo())

            storage.tpc_abort(txn[0])
            storage.sync()
            data_info[key] -= 1
            self.assertEqual(data_info, cluster.storage.getDataLockInfo())
        finally:
            cluster.stop()
Beispiel #11
0
    def _loadFromStorage(self, oid, at_tid, before_tid):
        packet = Packets.AskObject(oid, at_tid, before_tid)
        for node, conn in self.cp.iterateForObject(oid, readable=True):
            try:
                tid, next_tid, compression, checksum, data, data_tid \
                    = self._askStorage(conn, packet)
            except ConnectionClosed:
                continue

            if data or checksum != ZERO_HASH:
                if checksum != makeChecksum(data):
                    logging.error('wrong checksum from %s for oid %s',
                              conn, dump(oid))
                    continue
                return (decompress(data) if compression else data,
                        tid, next_tid, data_tid)
            raise NEOStorageCreationUndoneError(dump(oid))
        raise NEOStorageError("storage down or corrupted data")
Beispiel #12
0
    def _loadFromStorage(self, oid, at_tid, before_tid):
        packet = Packets.AskObject(oid, at_tid, before_tid)
        for node, conn in self.cp.iterateForObject(oid, readable=True):
            try:
                tid, next_tid, compression, checksum, data, data_tid \
                    = self._askStorage(conn, packet)
            except ConnectionClosed:
                continue

            if data or checksum != ZERO_HASH:
                if checksum != makeChecksum(data):
                    logging.error('wrong checksum from %s for oid %s', conn,
                                  dump(oid))
                    continue
                return (decompress(data) if compression else data, tid,
                        next_tid, data_tid)
            raise NEOStorageCreationUndoneError(dump(oid))
        raise NEOStorageError("storage down or corrupted data")
Beispiel #13
0
 def answerRebaseObject(self, conn, conflict, oid):
     if conflict:
         txn_context = self.app.getHandlerData()
         serial, conflict, data = conflict
         assert serial and serial < conflict, (serial, conflict)
         resolved = conflict <= txn_context.resolved_dict.get(oid, '')
         try:
             cached = txn_context.cache_dict.pop(oid)
         except KeyError:
             if resolved:
                 # We should still be waiting for an answer from this node,
                 # unless we lost connection.
                 assert conn.uuid in txn_context.data_dict[oid][2] or \
                        txn_context.conn_dict[conn.uuid] is None
                 return
             assert oid in txn_context.data_dict
             if serial <= txn_context.conflict_dict.get(oid, ''):
                 # Another node already reported this conflict or a newer,
                 # by answering to this rebase or to the previous store.
                 return
             # A node has not answered yet to a previous store. Do not wait
             # it to report the conflict because it may fail before.
         else:
             # The data for this oid are now back on client side.
             # Revert what was done in Transaction.written
             assert not resolved
             if data is None:  # undo or CHECKED_SERIAL
                 data = cached
             else:
                 compression, checksum, data = data
                 if checksum != makeChecksum(data):
                     raise NEOStorageError(
                         'wrong checksum while getting back data for'
                         ' object %s during rebase of transaction %s' %
                         (dump(oid), dump(txn_context.ttid)))
                 data = decompress_list[compression](data)
                 size = len(data)
                 txn_context.data_size += size
                 if cached:
                     assert cached == data
                     txn_context.cache_size -= size
             txn_context.data_dict[oid] = data, serial, []
         txn_context.conflict_dict[oid] = conflict
Beispiel #14
0
 def askStoreObject(self, conn, oid, serial, compression, checksum, data,
                    data_serial, ttid):
     if 1 < compression:
         raise ProtocolError('invalid compression value')
     # register the transaction
     self.app.tm.register(conn, ttid)
     if data or checksum != ZERO_HASH:
         # TODO: return an appropriate error packet
         assert makeChecksum(data) == checksum
         assert data_serial is None
     else:
         checksum = data = None
     try:
         self._askStoreObject(conn, oid, serial, compression, checksum,
                              data, data_serial, ttid, None)
     except DelayEvent, e:
         # locked by a previous transaction, retry later
         self.app.tm.queueEvent(self._askStoreObject, conn,
                                (oid, serial, compression, checksum, data,
                                 data_serial, ttid, time.time()), *e.args)
Beispiel #15
0
 def _iter_zodb(self, zodb_list):
     util.setproctitle('neostorage: import')
     p64 = util.p64
     u64 = util.u64
     zodb_list = list(zodb_list)
     if zodb_list:
         tid = None
         _compress = compress.getCompress(self.compress)
         while 1:
             zodb_list.sort()
             z = zodb_list[0]
             # Merge transactions with same tid. Only
             # user/desc/ext from first ZODB are kept.
             if tid != z.tid:
                 if tid:
                     yield txn
                 txn = transactionAsTuple(z.transaction)
                 tid = txn[-1]
             zodb = z.zodb
             for r in z.transaction:
                 oid = p64(u64(r.oid) + zodb.shift_oid)
                 data_tid = r.data_txn
                 if data_tid or r.data is None:
                     data = None
                 else:
                     _, compression, data = _compress(zodb.repickle(r.data))
                     data = util.makeChecksum(data), data, compression
                 yield oid, data, data_tid
             try:
                 z.next()
             except StopIteration:
                 del zodb_list[0]
                 if not zodb_list:
                     break
         yield txn
     yield
Beispiel #16
0
 def __dump(self, storage):
     return {u64(t.tid): [(u64(o.oid), o.data_txn and u64(o.data_txn),
                           None if o.data is None else makeChecksum(o.data))
                          for o in t]
             for t in storage.iterator()}
Beispiel #17
0
    def _import(self):
        p64 = util.p64
        u64 = util.u64
        tid = p64(self.zodb_tid + 1)
        zodb_list = []
        for zodb in self.zodb:
            try:
                zodb_list.append(ZODBIterator(zodb, tid, p64(self.zodb_ltid)))
            except StopIteration:
                pass
        tid = None

        def finish():
            if tid:
                self.storeTransaction(
                    tid, object_list,
                    ((x[0] for x in object_list), str(txn.user),
                     str(txn.description), cPickle.dumps(
                         txn.extension), False, tid), False)
                self.releaseData(data_id_list)
                logging.debug(
                    "TXN %s imported (user=%r, desc=%r, len(oid)=%s)",
                    util.dump(tid), txn.user, txn.description,
                    len(object_list))
                del object_list[:], data_id_list[:]
                if self._last_commit + 1 < time.time():
                    self.commit()
                self.zodb_tid = u64(tid)

        if self.compress:
            from zlib import compress
        else:
            compress = None
            compression = 0
        object_list = []
        data_id_list = []
        while zodb_list:
            zodb_list.sort()
            z = zodb_list[0]
            # Merge transactions with same tid. Only
            # user/desc/ext from first ZODB are kept.
            if tid != z.tid:
                finish()
                txn = z.transaction
                tid = txn.tid
                yield 1
            zodb = z.zodb
            for r in z.transaction:
                oid = p64(u64(r.oid) + zodb.shift_oid)
                data_tid = r.data_txn
                if data_tid or r.data is None:
                    data_id = None
                else:
                    data = zodb.repickle(r.data)
                    if compress:
                        compressed_data = compress(data)
                        compression = len(compressed_data) < len(data)
                        if compression:
                            data = compressed_data
                    checksum = util.makeChecksum(data)
                    data_id = self.holdData(util.makeChecksum(data), data,
                                            compression)
                    data_id_list.append(data_id)
                object_list.append((oid, data_id, data_tid))
                # Give the main loop the opportunity to process requests
                # from other nodes. In particular, clients may commit. If the
                # storage node exits after such commit, and before we actually
                # update 'obj' with 'object_list', some rows in 'data' may be
                # unreferenced. This is not a problem because the leak is
                # solved when resuming the migration.
                yield 1
            try:
                z.next()
            except StopIteration:
                del zodb_list[0]
        self._last_commit = 0
        finish()
        logging.warning(
            "All data are imported. You should change"
            " your configuration to use the native backend and restart.")
        self._import = None
        for x in """getObject getReplicationTIDList
                 """.split():
            setattr(self, x, getattr(self.db, x))
Beispiel #18
0
 def _getObject(self):
     oid = self.getOID(0)
     serial = self.getNextTID()
     data = 'DATA'
     return (oid, serial, 1, makeChecksum(data), data)
Beispiel #19
0
 def _getObject(self):
     oid = self.getOID(0)
     serial = self.getNextTID()
     data = 'DATA'
     return (oid, serial, 1, makeChecksum(data), data)
Beispiel #20
0
 def _import(self):
     p64 = util.p64
     u64 = util.u64
     tid = p64(self.zodb_tid + 1)
     zodb_list = []
     for zodb in self.zodb:
         try:
             zodb_list.append(ZODBIterator(zodb, tid, p64(self.zodb_ltid)))
         except StopIteration:
             pass
     tid = None
     def finish():
         if tid:
             self.storeTransaction(tid, object_list, (
                 (x[0] for x in object_list),
                 str(txn.user), str(txn.description),
                 cPickle.dumps(txn.extension), False, tid), False)
             self.releaseData(data_id_list)
             logging.debug("TXN %s imported (user=%r, desc=%r, len(oid)=%s)",
                 util.dump(tid), txn.user, txn.description, len(object_list))
             del object_list[:], data_id_list[:]
             if self._last_commit + 1 < time.time():
                 self.commit()
             self.zodb_tid = u64(tid)
     if self.compress:
         from zlib import compress
     else:
         compress = None
         compression = 0
     object_list = []
     data_id_list = []
     while zodb_list:
         zodb_list.sort()
         z = zodb_list[0]
         # Merge transactions with same tid. Only
         # user/desc/ext from first ZODB are kept.
         if tid != z.tid:
             finish()
             txn = z.transaction
             tid = txn.tid
             yield 1
         zodb = z.zodb
         for r in z.transaction:
             oid = p64(u64(r.oid) + zodb.shift_oid)
             data_tid = r.data_txn
             if data_tid or r.data is None:
                 data_id = None
             else:
                 data = zodb.repickle(r.data)
                 if compress:
                     compressed_data = compress(data)
                     compression = len(compressed_data) < len(data)
                     if compression:
                         data = compressed_data
                 checksum = util.makeChecksum(data)
                 data_id = self.holdData(util.makeChecksum(data), data,
                                         compression)
                 data_id_list.append(data_id)
             object_list.append((oid, data_id, data_tid))
             # Give the main loop the opportunity to process requests
             # from other nodes. In particular, clients may commit. If the
             # storage node exits after such commit, and before we actually
             # update 'obj' with 'object_list', some rows in 'data' may be
             # unreferenced. This is not a problem because the leak is
             # solved when resuming the migration.
             yield 1
         try:
             z.next()
         except StopIteration:
             del zodb_list[0]
     self._last_commit = 0
     finish()
     logging.warning("All data are imported. You should change"
         " your configuration to use the native backend and restart.")
     self._import = None
     for x in """getObject getReplicationTIDList
              """.split():
         setattr(self, x, getattr(self.db, x))