示例#1
0
    def store(self, oid, serial, data, version, transaction):
        # I think serial must be the tid that we read the object at
        if self._is_read_only:
            raise POSException.ReadOnlyError()
        if transaction is not self._transaction:
            raise POSException.StorageTransactionError(self, transaction)
        if version:
            raise POSException.Unsupported("Versions aren't supported")

        prev_tid = z64
        if not serial:
            serial = z64
        
        info = self._s3.findLastCommit(oid, self._wserial)
        if info:
            prev_tid = tid_unrepr(info['tid'])
            if prev_tid != serial:
                # conflict detected
                rdata = self.tryToResolveConflict(
                    oid, prev_tid, serial, data)
                if rdata is None:
                    raise POSException.ConflictError(
                        message='conflict error in store',
                        oid=oid, serials=(prev_tid, serial), data=data)
                else:
                    data = rdata

        self._s3.storePickle(oid, self._tid, prev_tid, data)
        self._dirty.add(oid)

        if prev_tid and serial != prev_tid:
            return ConflictResolution.ResolvedSerial
        else:
            return self._tid
示例#2
0
    def loadBefore(self, oid, tid):
        """Return most recent revision of oid before tid committed."""
        #TODO check recent cache first
        max_serial = p64(u64(self._s3.getSerialForTid(tid)) - 1)
        keydata = self._s3.findLastCommit(oid, max_serial)

        oid = oid_unrepr(keydata['oid'])
        serial = serial_unrepr(keydata['serial'])
        #get the pickle
        data = self._s3.loadPickle(oid, serial)


        # need another index for this
        #get the end date (should check recent cache first)
        #prefix = 'type:commit,'
        #marker = 'type:commit,oid:%s,' % oid_key
        #rs = self._bucket.get_all_keys(prefix=prefix, marker=marker,
        #                               maxkeys=1)
        #TODO error handling
        #assert len(keys) == 1
        #key = rs[0]
        #enddata = dict_from_key(key.key)
        #if enddata['tid'] > keydata['tid']:
        #    end = p64(serial_unrepr(enddata['tid']))
        #else:
        #    end = None
        end = None

        start = tid_unrepr(keydata['tid'])

        return data, start, end
示例#3
0
 def load(self, oid, version):
     """Return most recent revision of oid, at txn begin time"""
     if not self._lserial:
         # first access, start a read transaction
         self._ltid, self._lserial = self._lastCommit()
     
     #TODO check recent cache first
     keydata = self._s3.findLastCommit(oid, self._lserial)
     if keydata is None:
         raise KeyError(oid)
     tid = tid_unrepr(keydata['tid'])
     #get the pickle
     data = self._s3.loadPickle(oid, tid)
     return data, tid
示例#4
0
    def check_invalidations(self, last_tid, ignore_tid=None):
        """Looks for OIDs of objects that changed after the commit of last_tid.

        Returns ({oid: 1}, new_tid).  Once the invalidated
        objects have been flushed, the cache will be current as of
        new_tid; new_tid can be None if no transactions have yet
        committed.  If last_tid is None, this function returns an
        empty invalidation list.  If ignore_tid is provided, the given
        transaction will not be considered when listing OIDs.

        If all objects need to be invalidated because last_tid is not
        found (presumably it has been packed), returns (None, new_tid).
        """
        # find out the most recent transaction ID
        new_tid, new_serial = self._lastCommit()
        
        # Expect to get something back
        assert new_tid is not None

        if new_tid == last_tid:
            # No transactions have been committed since last_tid.
            return {}, last_tid

        if last_tid is None:
            # No last_tid, so presumably there are no objects to
            # invalidate yet.
            return {}, new_tid

        last_serial = self._s3.getSerialForTid(last_tid)
        if last_serial is None:
            # Transaction not found; perhaps it has been packed.
            # Indicate that the connection cache needs to be cleared.
            return None, new_tid

        # Get the list of changed OIDs and return it.
        newer = self._s3.getTrailingTransactions(last_serial)
        # Need to look at mc too

        dirty = {}
        for info, oids in newer:
            if ignore_tid == tid_unrepr(info['tid']):
                continue
            dirty.update(dict((k, 1) for k in oids))
        
        return dirty, new_tid
示例#5
0
 def _vote(self):
     # get the commit lock
     # now need to check for serialization errors
     # that is someone else has committed an object since tpc_begin with the
     # same oid as we are about to commit. Could we try resolving again?
     self._sync_forward_log()
     self._memcache.serialize_lock.acquire()
     self._serial = self._memcache.takeSerialTicket()
     self._sync_forward_log()
     for info, oids in self._fwdlog: # could be optimised
         conflicts = self._dirty.intersection(oids)
         if conflicts:
             self._memcache.serialize_lock.release() # release asap
             prev_tid = tid_unrepr(info['tid'])
             serial = self._lserial # what if we resolved??
             #import pdb; pdb.set_trace()
             raise POSException.ConflictError(
                 message = 'conflict error in vote',
                 oid=conflicts.pop(), serials=(prev_tid, self._tid),
                 data=None)