Example #1
0
    def create_and_populate_client():
        client = LocalClient(cache_options, cache_pfx)
        # Monkey with the internals so we don't have to
        # populate multiple times.
        #print("Testing", type(bucket._dict))

        # Only need to populate in the workers.
        size_dists = [100] * 800 + [300] * 500 + [1024] * 300 + [2048] * 200 + [4096] * 150

        with open('/dev/urandom', 'rb') as rnd:
            data = [rnd.read(x) for x in size_dists]
        data_iter = itertools.cycle(data)
        keys_and_values = []
        len_values = 0
        j = 0
        for j, datum in enumerate(data_iter):
            if len(datum) > client.limit or len_values + len(datum) > client.limit:
                break
            len_values += len(datum)
            # To ensure the pickle memo cache doesn't just write out "use object X",
            # but distinct copies of the strings, we need to copy them
            keys_and_values.append(
                ((j, j), (datum[:-1] + b'x', j))
            )

            # # We need to get the item so its frequency goes up enough to be written
            # # (this is while we're doing an aging at write time, which may go away).
            # # Using an assert statement causes us to write nothing if -O is used.
            # if bucket[(j, j)] is datum:
            #     raise AssertionError()
        mem_before = get_memory_usage()
        client._bulk_update(keys_and_values, mem_usage_before=mem_before)
        del keys_and_values
        #print("Len", len(bucket), "size", bucket.size, "checkpoints", client.get_checkpoints())
        return client
Example #2
0
    def _simulate_local(self, records, cache_local_mb, f):
        from relstorage.cache.local_client import LocalClient
        options = MockOptions()
        options.cache_local_mb = cache_local_mb
        options.cache_local_compression = 'none'
        client = LocalClient(options)

        now = time.time()
        for record in records:
            key = record.lba

            if record.opcode == 'r':
                data = client[key]
                if data is None:
                    # Fill it in from the backend
                    client[key] = b'r' * record.size
            else:
                assert record.opcode == 'w'
                client[key] = b'x' * record.size

        done = time.time()
        stats = client.stats()
        self._report_one(stats, f, cache_local_mb, now, done)

        return stats
Example #3
0
    def __init__(self,
                 adapter,
                 options,
                 prefix,
                 local_client=None,
                 _tracer=None):
        self.adapter = adapter
        self.options = options
        self.prefix = prefix or ''

        # checkpoints_key holds the current checkpoints.
        self.checkpoints_key = '%s:checkpoints' % self.prefix
        assert isinstance(self.checkpoints_key, str)  # no unicode on Py2

        # delta_after0 contains {oid: tid} after checkpoint 0
        # and before or at self.current_tid.
        self.delta_after0 = self._delta_map_type()

        # delta_after1 contains {oid: tid} after checkpoint 1 and
        # before or at checkpoint 0. The content of delta_after1 only
        # changes when checkpoints move.
        self.delta_after1 = self._delta_map_type()

        # delta_size_limit places an approximate limit on the number of
        # entries in the delta_after maps.
        self.delta_size_limit = options.cache_delta_size_limit

        self.clients_local_first = []
        if local_client is None:
            self.clients_local_first.append(LocalClient(options, self.prefix))
        else:
            self.clients_local_first.append(local_client)

        if options.cache_servers:
            module_name = options.cache_module_name
            module = importlib.import_module(module_name)
            servers = options.cache_servers
            if isinstance(servers, string_types):
                servers = servers.split()
            self.clients_local_first.append(module.Client(servers))

        # self.clients_local_first is in order from local to global caches,
        # while self.clients_global_first is in order from global to local.
        self.clients_global_first = list(reversed(self.clients_local_first))

        if local_client is None:
            self.restore()

        if _tracer is None:
            tracefile = persistence.trace_file(options, self.prefix)
            if tracefile:
                _tracer = ZEOTracer(tracefile)
                _tracer.trace(0x00)

        self._tracer = _tracer
        if hasattr(self._tracer, 'trace_store_current'):
            self._trace = self._tracer.trace
            self._trace_store_current = self._tracer.trace_store_current
Example #4
0
    def __init__(self, adapter, options, prefix, _parent=None):
        super(StorageCache, self).__init__()
        self.adapter = adapter
        self.options = options
        self.keep_history = options.keep_history
        self.prefix = prefix or ''

        if _parent is None:
            # I must be the master!

            # This is shared between all instances of a cache in a tree,
            # including the master, so that they can share information about
            # polling.
            self.polling_state = MVCCDatabaseCoordinator(self.options)
            self.local_client = LocalClient(options, self.prefix)

            shared_cache = MemcacheStateCache.from_options(
                options, self.prefix)
            if shared_cache is not None:
                self.cache = MultiStateCache(self.local_client, shared_cache)
            else:
                self.cache = self.local_client

            tracefile = persistence.trace_file(options, self.prefix)
            if tracefile:
                tracer = ZEOTracer(tracefile)
                tracer.trace(0x00)
                self.cache = TracingStateCache(self.cache, tracer)
        else:
            self.polling_state = _parent.polling_state  # type: MVCCDatabaseCoordinator
            self.local_client = _parent.local_client.new_instance()
            self.cache = _parent.cache.new_instance()

        # Once we have registered with the MVCCDatabaseCoordinator,
        # we cannot make any changes to our own mvcc state without
        # letting it know about them. In particular, that means we must
        # not just assign to this object (except under careful circumstances
        # where we're sure to be single threaded.)
        # This object can be None
        self.object_index = None

        # It is also important not to register with the coordinator until
        # we are fully initialized; we could be constructing a new_instance
        # in a separate thread while polling is going on in other threads.
        # We can get strange AttributeError if a partially constructed instance
        # is exposed.
        self.polling_state.register(self)

        if _parent is None:
            self.restore()
Example #5
0
    def makeOne(populate=True, data=None):
        mem_before = get_memory_usage()
        gc.collect()
        gc.collect()
        objects_before = len(gc.get_objects())

        client = LocalClient(options, 'pfx')
        client.b_mem_before = mem_before
        client.b_objects_before = objects_before
        if populate:
            client._bulk_update([
                (t[0], (t[1][0], t[1][1], False, 1))
                for t in data or _make_data(random_data, KEY_GROUP_SIZE)
            ])
        return client
Example #6
0
    def __init__(self,
                 adapter,
                 options,
                 prefix,
                 local_client=None,
                 _tracer=None):
        self.adapter = adapter
        self.options = options
        self.prefix = prefix or ''

        # delta_after0 contains {oid: tid} *after* checkpoint 0
        # and before or at self.current_tid.
        self.delta_after0 = self._delta_map_type()

        # delta_after1 contains {oid: tid} *after* checkpoint 1 and
        # *before* or at checkpoint 0. The content of delta_after1 only
        # changes when checkpoints shift and we rebuild it.
        self.delta_after1 = self._delta_map_type()

        # delta_size_limit places an approximate limit on the number of
        # entries in the delta_after maps.
        self.delta_size_limit = options.cache_delta_size_limit

        if local_client is None:
            self.local_client = LocalClient(options, self.prefix)
        else:
            self.local_client = local_client

        shared_cache = MemcacheStateCache.from_options(options, self.prefix)
        if shared_cache is not None:
            self.cache = MultiStateCache(self.local_client, shared_cache)
        else:
            self.cache = self.local_client

        if local_client is None:
            self.restore()

        if _tracer is None:
            tracefile = persistence.trace_file(options, self.prefix)
            if tracefile:
                _tracer = ZEOTracer(tracefile)
                _tracer.trace(0x00)

        self._tracer = _tracer
        if hasattr(self._tracer, 'trace_store_current'):
            self.cache = TracingStateCache(self.cache, _tracer)
Example #7
0
 def read_client():
     begin = perf_counter()
     c2 = LocalClient(cache_options, cache_pfx)
     c2.restore()
     end = perf_counter()
     return end - begin
Example #8
0
 def makeOne(bucket_kind, populate=True):
     options.cache_local_storage = bucket_kind
     client = LocalClient(options, 'pfx')
     if populate:
         client._bucket0.bulk_update(ALL_DATA)
     return client
Example #9
0
 def populate_empty():
     c = LocalClient(options)
     for k, v in ALL_DATA:
         c.set(k, v)