Esempio n. 1
0
 def delete_from(self, table, **kw):
     if not kw:
         raise AssertionError("Need at least one column value")
     columns = tuple(sorted(iterkeys(kw)))
     key = (table, columns)
     rows = self.deletes.get(key)
     if rows is None:
         self.deletes[key] = rows = set()
     row = tuple(str(kw[column]) for column in columns)
     rows.add(row)
     self.rows_added += 1
     if self.rows_added >= self.row_limit:
         self.flush()
Esempio n. 2
0
 def delete_from(self, table, **kw):
     if not kw:
         raise AssertionError("Need at least one column value")
     columns = tuple(sorted(iterkeys(kw)))
     key = (table, columns)
     rows = self.deletes.get(key)
     if rows is None:
         self.deletes[key] = rows = set()
     row = tuple(str(kw[column]) for column in columns)
     rows.add(row)
     self.rows_added += 1
     if self.rows_added >= self.row_limit:
         self.flush()
Esempio n. 3
0
class _TransactionRangeObjectIndex(OidTMap):
    """
    Holds the portion of the object index visible to transactions <=
    ``highest_visible_tid``.

    Initialized to be either empty, or with the *complete* index of
    objects <= ``highest_visible_tid`` and greater than
    ``complete_since_tid``. (That is, you cannot pass an 'ignore_txn'
    value to the poller. TODO: Workon that. Maybe there's a way.)

    ``highest_visible_tid`` must always be given, but
    ``complete_since_tid`` may be null.

    These attribute, once set, will never change. We may add data
    prior to and including ``complete_since_tid`` as we access it, but we have no
    guarantee that it is complete.

    When ``complete_since_tid`` and ``highest_visible_tid`` are the same
    """
    __slots__ = (
        'highest_visible_tid',
        'complete_since_tid',
        'accepts_writes',
    )

    # When the root node of a BTree splits (outgrows ``max_internal_size``),
    # it creates a new BTree object to be its child by calling ``type(self)()``
    # That doesn't work if you have required arguments.

    def __init__(self,
                 highest_visible_tid=0,
                 complete_since_tid=None,
                 data=()):
        assert complete_since_tid is None or highest_visible_tid >= complete_since_tid
        self.highest_visible_tid = highest_visible_tid
        self.complete_since_tid = complete_since_tid
        self.accepts_writes = True

        OidTMap.__init__(self, data)

        if self:
            # Verify the data matches what they told us.
            # If we were constructed with data, we must be complete.
            # Otherwise we get built up bit by bit.
            assert self.complete_since_tid
            self.verify()
        else:
            # If we had no changes, then either we polled for the same tid
            # as we got, or we didn't try to poll for changes at all.
            assert complete_since_tid is None or complete_since_tid == highest_visible_tid, (
                complete_since_tid, highest_visible_tid)

    def verify(self, initial=True):
        # Check that our constraints are met
        if not self or not __debug__:
            return

        max_stored_tid = self.max_stored_tid()
        min_stored_tid = self.min_stored_tid()
        hvt = self.highest_visible_tid
        assert max_stored_tid <= hvt, (max_stored_tid, hvt, self)
        assert min_stored_tid > 0, min_stored_tid
        if initial:
            # This is only true at startup. Over time we can add older entries.
            assert self.complete_since_tid is None or min_stored_tid > self.complete_since_tid, (
                min_stored_tid, self.complete_since_tid)

    def complete_to(self, newer_bucket):
        """
        Given an incomplete bucket (this object) and a possibly-complete bucket for the
        same or a later TID, merge this one to hold the same data and be complete
        for the same transaction range.

        This bucket will be complete for the given bucket's completion, *if* the
        given bucket actually had a different tid than this one. If the given
        bucket was the same tid, then nothing changed and we can't presume
        to be complete.
        """
        assert not self.complete_since_tid
        assert newer_bucket.highest_visible_tid >= self.highest_visible_tid
        self.update(newer_bucket)
        if newer_bucket.highest_visible_tid > self.highest_visible_tid:
            self.highest_visible_tid = newer_bucket.highest_visible_tid
            self.complete_since_tid = newer_bucket.complete_since_tid

    def merge_same_tid(self, bucket):
        """
        Given an incoming complete bucket for the same highest tid as this bucket,
        merge the two into this object.
        """
        assert bucket.highest_visible_tid == self.highest_visible_tid
        self.update(bucket)
        if bucket.complete_since_tid < self.complete_since_tid:
            self.complete_since_tid = bucket.complete_since_tid

    def merge_older_tid(self, bucket):
        """
        Given an incoming complete bucket for an older tid than this bucket,
        merge the two into this object.

        Because we're newer, entries in this bucket supercede objects
        in the incoming data.

        If the *bucket* was complete to a transaction earlier than the transaction
        we're complete to, we become complete to that earlier transaction.

        Does not modify the *bucket*.
        """
        assert bucket.highest_visible_tid <= self.highest_visible_tid
        #debug('Diff between self', dict(self), "and", dict(bucket), items_not_in_self)
        # bring missing data into ourself, being careful not to overwrite
        # things we do have.
        self.update(bucket.items_not_in(self))
        if bucket.complete_since_tid and bucket.complete_since_tid < self.complete_since_tid:
            self.complete_since_tid = bucket.complete_since_tid

    # These raise ValueError if the map is empty
    if not hasattr(OidTMap, 'maxKey'):
        maxKey = lambda self: max(iterkeys(self))

    if hasattr(OidTMap, 'itervalues'):  # BTrees, or Python 2 dict

        def maxValue(self):
            return max(self.itervalues())

        def minValue(self):
            return min(self.itervalues())
    else:

        def maxValue(self):
            return max(self.values())

        def minValue(self):
            return min(self.values())

    if not hasattr(OidTMap, 'iteritems'):
        iteritems = OidTMap.items

    def items_not_in(self, other):
        """
        Return the ``(k, v)`` pairs in self whose ``k`` is not found in *other*
        """
        return OidTMap_difference(self, other)

    max_stored_tid = maxValue
    min_stored_tid = minValue

    def __repr__(self):
        return '<%s at 0x%x hvt=%s complete_after=%s len=%s readonly=%s>' % (
            self.__class__.__name__,
            id(self),
            self.highest_visible_tid,
            self.complete_since_tid,
            len(self),
            not self.accepts_writes,
        )