Ejemplo n.º 1
0
    def set_multi(self, d, allow_replace=True):
        if not self._bucket_limit:
            # don't bother
            return
        compress = self._compress
        self._lock_acquire()
        try:
            for key, value in iteritems(d):
                if isinstance(value, compressible):
                    if len(value) >= self._value_limit:
                        # This value is too big, so don't cache it.
                        continue
                    if compress is not None:
                        cvalue = compress(value)
                    else:
                        cvalue = value

                else:
                    cvalue = value

                if key in self._bucket0:
                    if not allow_replace:
                        continue
                    del self._bucket0[key]

                if key in self._bucket1:
                    if not allow_replace:
                        continue
                    del self._bucket1[key]

                self._set_one(key, cvalue)
        finally:
            self._lock_release()
Ejemplo n.º 2
0
 def abort(self):
     if self._txn_blobs:
         for oid, filename in iteritems(self._txn_blobs):
             if os.path.exists(filename):
                 ZODB.blob.remove_committed(filename)
                 if self.shared_blob_dir:
                     dirname = os.path.dirname(filename)
                     if not _has_files(dirname):
                         ZODB.blob.remove_committed_dir(dirname)
Ejemplo n.º 3
0
    def __init__(self, **kwoptions):
        self.name = None
        self.read_only = False
        self.blob_dir = None
        self.shared_blob_dir = True
        self.blob_cache_size = None
        self.blob_cache_size_check = 10
        self.blob_chunk_size = 1 << 20
        self.keep_history = True
        self.replica_conf = None
        self.ro_replica_conf = None
        self.replica_timeout = 600.0
        self.revert_when_stale = False
        self.poll_interval = 0
        self.pack_gc = True
        self.pack_prepack_only = False
        self.pack_skip_prepack = False
        self.pack_batch_timeout = 1.0
        self.pack_commit_busy_delay = 5.0
        self.cache_servers = ()  # ['127.0.0.1:11211']
        self.cache_module_name = 'relstorage.pylibmc_wrapper'
        self.cache_prefix = ''
        self.cache_local_mb = 10
        self.cache_local_object_max = 16384
        self.cache_local_compression = 'zlib'
        self.cache_delta_size_limit = 10000
        self.commit_lock_timeout = 30
        self.commit_lock_id = 0
        self.create_schema = True
        self.strict_tpc = default_strict_tpc

        # If share_local_cache is off, each storage instance has a private
        # cache rather than a shared cache.  This option exists mainly for
        # simulating disconnected caches in tests.
        self.share_local_cache = True

        for key, value in iteritems(kwoptions):
            if key in self.__dict__:
                setattr(self, key, value)
            else:
                raise TypeError("Unknown parameter: %s" % key)
Ejemplo n.º 4
0
    def __init__(self, **kwoptions):
        self.name = None
        self.read_only = False
        self.blob_dir = None
        self.shared_blob_dir = True
        self.blob_cache_size = None
        self.blob_cache_size_check = 10
        self.blob_chunk_size = 1 << 20
        self.keep_history = True
        self.replica_conf = None
        self.ro_replica_conf = None
        self.replica_timeout = 600.0
        self.revert_when_stale = False
        self.poll_interval = 0
        self.pack_gc = True
        self.pack_prepack_only = False
        self.pack_skip_prepack = False
        self.pack_batch_timeout = 1.0
        self.pack_commit_busy_delay = 5.0
        self.cache_servers = ()  # ['127.0.0.1:11211']
        self.cache_module_name = 'relstorage.pylibmc_wrapper'
        self.cache_prefix = ''
        self.cache_local_mb = 10
        self.cache_local_object_max = 16384
        self.cache_local_compression = 'zlib'
        self.cache_delta_size_limit = 10000
        self.commit_lock_timeout = 30
        self.commit_lock_id = 0
        self.create_schema = True
        self.strict_tpc = default_strict_tpc

        # If share_local_cache is off, each storage instance has a private
        # cache rather than a shared cache.  This option exists mainly for
        # simulating disconnected caches in tests.
        self.share_local_cache = True

        for key, value in iteritems(kwoptions):
            if key in self.__dict__:
                setattr(self, key, value)
            else:
                raise TypeError("Unknown parameter: %s" % key)
Ejemplo n.º 5
0
    def after_poll(self, cursor, prev_tid_int, new_tid_int, changes):
        """Update checkpoint data after a database poll.

        cursor is connected to a load connection.

        changes lists all [(oid_int, tid_int)] changed after
        prev_tid_int, up to and including new_tid_int, excluding the
        changes last committed by the associated storage instance.
        changes can be None to indicate too many objects changed
        to list them all.

        prev_tid_int can be None, in which case the changes
        parameter will be ignored.  new_tid_int can not be None.
        """
        new_checkpoints = None
        for client in self.clients_global_first:
            s = client.get(self.checkpoints_key)
            if s:
                try:
                    c0, c1 = s.split()
                    c0 = int(c0)
                    c1 = int(c1)
                except ValueError:
                    # Invalid checkpoint cache value; ignore it.
                    pass
                else:
                    if c0 >= c1:
                        new_checkpoints = (c0, c1)
                        break

        if not new_checkpoints:
            new_checkpoints = (new_tid_int, new_tid_int)

            if not self.checkpoints:
                # Initialize the checkpoints.
                cache_data = '%d %d' % new_checkpoints
                log.debug("Initializing checkpoints: %s", cache_data)
            else:
                # Suggest reinstatement of the former checkpoints, but
                # use new checkpoints for this instance. Using new
                # checkpoints ensures that we don't build up
                # self.delta_after0 in case the cache is offline.
                cache_data = '%d %d' % self.checkpoints
                log.debug("Reinstating checkpoints: %s", cache_data)
            for client in self.clients_global_first:
                client.set(self.checkpoints_key, cache_data)

            self.checkpoints = new_checkpoints
            self.delta_after0 = {}
            self.delta_after1 = {}
            self.current_tid = new_tid_int
            return

        allow_shift = True
        if new_checkpoints[0] > new_tid_int:
            # checkpoint0 is in a future that this instance can't
            # yet see.  Ignore the checkpoint change for now.
            new_checkpoints = self.checkpoints
            if not new_checkpoints:
                new_checkpoints = (new_tid_int, new_tid_int)
            allow_shift = False

        # We want to keep the current checkpoints for speed, but we
        # have to replace them (to avoid consistency violations)
        # if certain conditions happen (like emptying the ZODB cache).
        if (new_checkpoints == self.checkpoints and changes is not None
                and prev_tid_int and prev_tid_int <= self.current_tid
                and new_tid_int >= self.current_tid):
            # All the conditions for keeping the checkpoints were met,
            # so just update self.delta_after0 and self.current_tid.
            m = self.delta_after0
            m_get = m.get
            for oid_int, tid_int in changes:
                my_tid_int = m_get(oid_int)
                if my_tid_int is None or tid_int > my_tid_int:
                    m[oid_int] = tid_int
            self.current_tid = new_tid_int
        else:
            # We have to replace the checkpoints.
            cp0, cp1 = new_checkpoints
            log.debug("Using new checkpoints: %d %d", cp0, cp1)
            # Use the checkpoints specified by the cache.
            # Rebuild delta_after0 and delta_after1.
            new_delta_after0 = {}
            new_delta_after1 = {}
            if cp1 < new_tid_int:
                # poller.list_changes provides an iterator of
                # (oid, tid) where tid > after_tid and tid <= last_tid.
                change_list = self.adapter.poller.list_changes(
                    cursor, cp1, new_tid_int)

                # Make a dictionary that contains, for each oid, the most
                # recent tid listed in changes.
                change_dict = {}
                if not isinstance(change_list, list):
                    change_list = list(change_list)
                change_list.sort()
                for oid_int, tid_int in change_list:
                    change_dict[oid_int] = tid_int

                # Put the changes in new_delta_after*.
                for oid_int, tid_int in iteritems(change_dict):
                    if tid_int > cp0:
                        new_delta_after0[oid_int] = tid_int
                    elif tid_int > cp1:
                        new_delta_after1[oid_int] = tid_int

            self.checkpoints = new_checkpoints
            self.delta_after0 = new_delta_after0
            self.delta_after1 = new_delta_after1
            self.current_tid = new_tid_int

        if allow_shift and len(self.delta_after0) >= self.delta_size_limit:
            # delta_after0 has reached its limit.  The way to
            # shrink it is to shift the checkpoints.  Suggest
            # shifted checkpoints for future polls.
            # If delta_after0 is far over the limit (caused by a large
            # transaction), suggest starting new checkpoints instead of
            # shifting.
            oversize = (len(self.delta_after0) >= self.delta_size_limit * 2)
            self._suggest_shifted_checkpoints(new_tid_int, oversize)