Esempio n. 1
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                    and ((self._savepoint_storage is None) or
                         (oid not in self._savepoint_storage.creating)
                         or self._savepoint_storage.creating[oid])):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                if (oid in self._invalidated
                        and not hasattr(obj, '_p_resolveConflict')):
                    raise ConflictError(object=obj)
                self._modified.append(oid)
            p = writer.serialize(obj)  # This calls __getstate__ of obj

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported("Storing Blobs in %s is not supported." %
                                      repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                s = self._storage.storeBlob(oid, serial, p, obj._uncommitted(),
                                            self._version, transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, self._version,
                                        transaction)
            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)
            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._handle_serial(s, oid)
Esempio n. 2
0
 def loadBlob(self, oid, serial):
     """Return the filename where the blob file can be found.
     """
     if not IBlobStorage.providedBy(self._storage):
         raise Unsupported(
             "Blobs are not supported by the underlying storage %r." %
             self._storage)
     filename = self._getCleanFilename(oid, serial)
     if not os.path.exists(filename):
         return self._storage.loadBlob(oid, serial)
     return filename
Esempio n. 3
0
    def _store_objects(self, writer, transaction):
        for obj in writer:
            oid = obj._p_oid
            serial = getattr(obj, "_p_serial", z64)

            if ((serial == z64)
                    and ((self._savepoint_storage is None) or
                         (oid not in self._savepoint_storage.creating)
                         or self._savepoint_storage.creating[oid])):

                # obj is a new object

                # Because obj was added, it is now in _creating, so it
                # can be removed from _added.  If oid wasn't in
                # adding, then we are adding it implicitly.

                implicitly_adding = self._added.pop(oid, None) is None

                self._creating[oid] = implicitly_adding

            else:
                self._modified.append(oid)

            p = writer.serialize(obj)  # This calls __getstate__ of obj
            if len(p) >= self.large_record_size:
                warnings.warn(large_object_message % (obj.__class__, len(p)))

            if isinstance(obj, Blob):
                if not IBlobStorage.providedBy(self._storage):
                    raise Unsupported("Storing Blobs in %s is not supported." %
                                      repr(self._storage))
                if obj.opened():
                    raise ValueError("Can't commit with opened blobs.")
                blobfilename = obj._uncommitted()
                if blobfilename is None:
                    assert serial is not None  # See _uncommitted
                    self._modified.pop()  # not modified
                    continue
                s = self._storage.storeBlob(oid, serial, p, blobfilename, '',
                                            transaction)
                # we invalidate the object here in order to ensure
                # that that the next attribute access of its name
                # unghostify it, which will cause its blob data
                # to be reattached "cleanly"
                obj._p_invalidate()
            else:
                s = self._storage.store(oid, serial, p, '', transaction)

            self._store_count += 1
            # Put the object in the cache before handling the
            # response, just in case the response contains the
            # serial number for a newly created object
            try:
                self._cache[oid] = obj
            except:
                # Dang, I bet it's wrapped:
                # TODO:  Deprecate, then remove, this.
                if hasattr(obj, 'aq_base'):
                    self._cache[oid] = obj.aq_base
                else:
                    raise

            self._cache.update_object_size_estimation(oid, len(p))
            obj._p_estimated_size = len(p)

            # if we write an object, we don't want to check if it was read
            # while current.  This is a convenient choke point to do this.
            self._readCurrent.pop(oid, None)
            if s:
                # savepoint
                obj._p_changed = 0  # transition from changed to up-to-date
                obj._p_serial = s
Esempio n. 4
0
 def restoreBlob(self, cursor, oid, serial, blobfilename):
     raise Unsupported("No blob directory is configured.")
Esempio n. 5
0
 def storeBlob(self, cursor, store_func,
               oid, serial, data, blobfilename, version, txn):
     raise Unsupported("No blob directory is configured.")
Esempio n. 6
0
 def temporaryDirectory(self):
     raise Unsupported("No blob directory is configured.")
Esempio n. 7
0
 def openCommittedBlobFile(self, cursor, oid, serial, blob=None):
     raise Unsupported("No blob directory is configured.")
Esempio n. 8
0
 def loadBlob(self, cursor, oid, serial):
     raise Unsupported("No blob directory is configured.")
Esempio n. 9
0
class Poller(object):
    """Database change notification poller"""

    _list_changes_range_queries = ("""
        SELECT zoid, tid
        FROM current_object
        WHERE tid > %(min_tid)s
            AND tid <= %(max_tid)s
        """, """
        SELECT zoid, tid
        FROM object_state
        WHERE tid > %(min_tid)s
            AND tid <= %(max_tid)s
        """)

    _list_changes_range_query = formatted_query_property('_list_changes_range')

    _poll_inv_queries = ("""
        SELECT zoid, tid
        FROM current_object
        WHERE tid > %(tid)s
        """, """
        SELECT zoid, tid
        FROM object_state
        WHERE tid > %(tid)s
        """)

    _poll_inv_query = formatted_query_property('_poll_inv')

    _poll_inv_exc_query = formatted_query_property(
        '_poll_inv', extension=' AND tid != %(self_tid)s')

    _tran_exists_queries = (
        "SELECT 1 FROM transaction WHERE tid = %(tid)s",
        Unsupported("Transaction data not available without history"))

    _tran_exists_query = formatted_query_property('_tran_exists')

    def __init__(self, poll_query, keep_history, runner, revert_when_stale):
        self.poll_query = poll_query
        self.keep_history = keep_history
        self.runner = runner
        self.revert_when_stale = revert_when_stale

    def poll_invalidations(self, conn, cursor, prev_polled_tid, ignore_tid):
        """
        Polls for new transactions.

        conn and cursor must have been created previously by open_for_load().
        prev_polled_tid is the tid returned at the last poll, or None
        if this is the first poll.  If ignore_tid is not None, changes
        committed in that transaction will not be included in the list
        of changed OIDs.

        Returns (changes, new_polled_tid), where changes is either
        a list of (oid, tid) that have changed, or None to indicate
        that the changes are too complex to list.  new_polled_tid can be
        0 if there is no data in the database.
        """
        # pylint:disable=unused-argument
        # find out the tid of the most recent transaction.
        cursor.execute(self.poll_query)
        rows = list(cursor)
        if not rows or not rows[0][0]:
            # No data.
            return None, 0
        new_polled_tid = rows[0][0]

        if prev_polled_tid is None:
            # This is the first time the connection has polled.
            return None, new_polled_tid

        if new_polled_tid == prev_polled_tid:
            # No transactions have been committed since prev_polled_tid.
            return (), new_polled_tid

        if new_polled_tid <= prev_polled_tid:
            # The database connection is stale. This can happen after
            # reading an asynchronous slave that is not fully up to date.
            # (It may also suggest that transaction IDs are not being created
            # in order, which would be a serious bug leading to consistency
            # violations.)
            if self.revert_when_stale:
                # This client prefers to revert to the old state.
                log.warning(
                    "Reverting to stale transaction ID %d and clearing cache. "
                    "(prev_polled_tid=%d)", new_polled_tid, prev_polled_tid)
                # We have to invalidate the whole cPickleCache, otherwise
                # the cache would be inconsistent with the reverted state.
                return None, new_polled_tid

            # This client never wants to revert to stale data, so
            # raise ReadConflictError to trigger a retry.
            # We're probably just waiting for async replication
            # to catch up, so retrying could do the trick.
            raise ReadConflictError(
                "The database connection is stale: new_polled_tid=%d, "
                "prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid))

        # New transaction(s) have been added.

        if self.keep_history:
            # If the previously polled transaction no longer exists,
            # the cache is too old and needs to be cleared.
            # XXX Do we actually need to detect this condition? I think
            # if we delete this block of code, all the unreachable
            # objects will be garbage collected anyway. So, as a test,
            # there is no equivalent of this block of code for
            # history-free storage. If something goes wrong, then we'll
            # know there's some other edge condition we have to account
            # for.
            cursor.execute(self._tran_exists_query, {'tid': prev_polled_tid})
            rows = cursor.fetchall()
            if not rows:
                # Transaction not found; perhaps it has been packed.
                # The connection cache should be cleared.
                return None, new_polled_tid

        # Get the list of changed OIDs and return it.
        stmt = self._poll_inv_query
        params = {'tid': prev_polled_tid}
        if ignore_tid is not None:
            stmt = self._poll_inv_exc_query
            params['self_tid'] = ignore_tid

        cursor.execute(stmt, params)
        changes = cursor.fetchall()

        return changes, new_polled_tid

    def list_changes(self, cursor, after_tid, last_tid):
        """Return the (oid, tid) values changed in a range of transactions.

        The returned iterable must include the latest changes in the range
        after_tid < tid <= last_tid.
        """
        params = {'min_tid': after_tid, 'max_tid': last_tid}
        cursor.execute(self._list_changes_range_query, params)
        return cursor.fetchall()