示例#1
0
    def __init__(self, hs):
        self.hs = hs
        self.clock = hs.get_clock()
        self.store = hs.get_datastore()
        self.state = hs.get_state_handler()
        self.TOKEN_NOT_FOUND_HTTP_STATUS = 401

        self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
        register_cache("token_cache", self.token_cache)
示例#2
0
    def __init__(self, hs):
        self.hs = hs
        self.clock = hs.get_clock()
        self.store = hs.get_datastore()
        self.state = hs.get_state_handler()
        self.TOKEN_NOT_FOUND_HTTP_STATUS = 401

        self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
        register_cache("cache", "token_cache", self.token_cache)
示例#3
0
    def __init__(self, hs):
        self.hs = hs
        self.clock = hs.get_clock()
        self.store = hs.get_datastore()
        self.state = hs.get_state_handler()

        self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
        register_cache("cache", "token_cache", self.token_cache)

        self._account_validity = hs.config.account_validity
示例#4
0
文件: auth.py 项目: mjvaldez/synapse
    def __init__(self, hs):
        self.hs = hs
        self.clock = hs.get_clock()
        self.store = hs.get_datastore()
        self.state = hs.get_state_handler()

        self.token_cache = LruCache(10000)
        register_cache("cache", "token_cache", self.token_cache)

        self._auth_blocking = AuthBlocking(self.hs)

        self._account_validity = hs.config.account_validity
        self._track_appservice_user_ips = hs.config.track_appservice_user_ips
        self._macaroon_secret_key = hs.config.macaroon_secret_key
示例#5
0
    def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
                 reset_expiry_on_get=False):
        """
        Args:
            cache_name (str): Name of this cache, used for logging.
            clock (Clock)
            max_len (int): Max size of dict. If the dict grows larger than this
                then the oldest items get automatically evicted. Default is 0,
                which indicates there is no max limit.
            expiry_ms (int): How long before an item is evicted from the cache
                in milliseconds. Default is 0, indicating items never get
                evicted based on time.
            reset_expiry_on_get (bool): If true, will reset the expiry time for
                an item on access. Defaults to False.

        """
        self._cache_name = cache_name

        self._clock = clock

        self._max_len = max_len
        self._expiry_ms = expiry_ms

        self._reset_expiry_on_get = reset_expiry_on_get

        self._cache = {}

        self.metrics = register_cache(cache_name, self._cache)
示例#6
0
    def __init__(
        self,
        name: str,
        current_stream_pos: int,
        max_size=10000,
        prefilled_cache: Optional[Mapping[EntityType, int]] = None,
    ):
        self._original_max_size = max_size
        self._max_size = math.floor(max_size)
        self._entity_to_key = {}  # type: Dict[EntityType, int]

        # map from stream id to the a set of entities which changed at that stream id.
        self._cache = SortedDict()  # type: SortedDict[int, Set[EntityType]]

        # the earliest stream_pos for which we can reliably answer
        # get_all_entities_changed. In other words, one less than the earliest
        # stream_pos for which we know _cache is valid.
        #
        self._earliest_known_stream_pos = current_stream_pos
        self.name = name
        self.metrics = caches.register_cache(
            "cache",
            self.name,
            self._cache,
            resize_callback=self.set_cache_factor)

        if prefilled_cache:
            for entity, stream_pos in prefilled_cache.items():
                self.entity_has_changed(entity, stream_pos)
    def __init__(self, hs, name, timeout_ms=0):
        self.pending_result_cache = {}  # Requests that haven't finished yet.

        self.clock = hs.get_clock()
        self.timeout_sec = timeout_ms / 1000.0

        self._name = name
        self._metrics = register_cache("response_cache", name, self)
    def __init__(self, clock: Clock, name: str, timeout_ms: float = 0):
        self._result_cache: Dict[KV, ResponseCacheEntry] = {}

        self.clock = clock
        self.timeout_sec = timeout_ms / 1000.0

        self._name = name
        self._metrics = register_cache("response_cache", name, self, resizable=False)
示例#9
0
    def __init__(self, clock: Clock, name: str, timeout_ms: float = 0):
        # Requests that haven't finished yet.
        self.pending_result_cache = {}  # type: Dict[T, ObservableDeferred]

        self.clock = clock
        self.timeout_sec = timeout_ms / 1000.0

        self._name = name
        self._metrics = register_cache("response_cache", name, self, resizable=False)
示例#10
0
    def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}):
        self._max_size = int(max_size * CACHE_SIZE_FACTOR)
        self._entity_to_key = {}
        self._cache = sorteddict()
        self._earliest_known_stream_pos = current_stream_pos
        self.name = name
        self.metrics = register_cache(self.name, self._cache)

        for entity, stream_pos in prefilled_cache.items():
            self.entity_has_changed(entity, stream_pos)
示例#11
0
    def __init__(self, cache_name, timer=time.time):
        # map from key to _CacheEntry
        self._data = {}

        # the _CacheEntries, sorted by expiry time
        self._expiry_list = SortedList()

        self._timer = timer

        self._metrics = register_cache("ttl", cache_name, self)
示例#12
0
    def __init__(self, cache_name, timer=time.time):
        # map from key to _CacheEntry
        self._data = {}

        # the _CacheEntries, sorted by expiry time
        self._expiry_list = SortedList()

        self._timer = timer

        self._metrics = register_cache("ttl", cache_name, self)
    def __init__(self, hs):
        self.hs = hs
        self.store = hs.get_datastore()
        self.auth = hs.get_auth()

        self.room_push_rule_cache_metrics = register_cache(
            "cache",
            "room_push_rule_cache",
            cache=[],  # Meaningless size, as this isn't a cache that stores values
        )
示例#14
0
    def __init__(self, name, current_stream_pos, max_size=10000, prefilled_cache={}):
        self._max_size = int(max_size * CACHE_SIZE_FACTOR)
        self._entity_to_key = {}
        self._cache = sorteddict()
        self._earliest_known_stream_pos = current_stream_pos
        self.name = name
        self.metrics = register_cache(self.name, self._cache)

        for entity, stream_pos in prefilled_cache.items():
            self.entity_has_changed(entity, stream_pos)
示例#15
0
    def __init__(self, cache_name: str, timer: Callable[[], float] = time.time):
        # map from key to _CacheEntry
        self._data: Dict[KT, _CacheEntry] = {}

        # the _CacheEntries, sorted by expiry time
        self._expiry_list: SortedList[_CacheEntry] = SortedList()

        self._timer = timer

        self._metrics = register_cache("ttl", cache_name, self, resizable=False)
示例#16
0
    def __init__(self, hs, name, timeout_ms=0):
        self.pending_result_cache = {}  # Requests that haven't finished yet.

        self.clock = hs.get_clock()
        self.timeout_sec = timeout_ms / 1000.

        self._name = name
        self._metrics = register_cache(
            "response_cache", name, self
        )
示例#17
0
    def __init__(
        self,
        cache_name: str,
        clock: Clock,
        max_len: int = 0,
        expiry_ms: int = 0,
        reset_expiry_on_get: bool = False,
        iterable: bool = False,
    ):
        """
        Args:
            cache_name: Name of this cache, used for logging.
            clock
            max_len: Max size of dict. If the dict grows larger than this
                then the oldest items get automatically evicted. Default is 0,
                which indicates there is no max limit.
            expiry_ms: How long before an item is evicted from the cache
                in milliseconds. Default is 0, indicating items never get
                evicted based on time.
            reset_expiry_on_get: If true, will reset the expiry time for
                an item on access. Defaults to False.
            iterable: If true, the size is calculated by summing the
                sizes of all entries, rather than the number of entries.
        """
        self._cache_name = cache_name

        self._original_max_size = max_len

        self._max_size = int(max_len *
                             cache_config.properties.default_factor_size)

        self._clock = clock

        self._expiry_ms = expiry_ms
        self._reset_expiry_on_get = reset_expiry_on_get

        self._cache: OrderedDict[KT, _CacheEntry] = OrderedDict()

        self.iterable = iterable

        self.metrics = register_cache("expiring", cache_name, self)

        if not self._expiry_ms:
            # Don't bother starting the loop if things never expire
            return

        def f():
            return run_as_background_process(
                "prune_cache_%s" % self._cache_name, self._prune_cache)

        self._clock.looping_call(f, self._expiry_ms / 2)
示例#18
0
    def __init__(self, clock: Clock, name: str, timeout_ms: float = 0):
        # This is poorly-named: it includes both complete and incomplete results.
        # We keep complete results rather than switching to absolute values because
        # that makes it easier to cache Failure results.
        self.pending_result_cache: Dict[KV, ObservableDeferred] = {}

        self.clock = clock
        self.timeout_sec = timeout_ms / 1000.0

        self._name = name
        self._metrics = register_cache("response_cache",
                                       name,
                                       self,
                                       resizable=False)
示例#19
0
    def __init__(self, hs: "HomeServer"):
        self.hs = hs
        self.store = hs.get_datastore()
        self._event_auth_handler = hs.get_event_auth_handler()

        # Used by `RulesForRoom` to ensure only one thing mutates the cache at a
        # time. Keyed off room_id.
        self._rules_linearizer = Linearizer(name="rules_for_room")

        self.room_push_rule_cache_metrics = register_cache(
            "cache",
            "room_push_rule_cache",
            cache=
            [],  # Meaningless size, as this isn't a cache that stores values,
            resizable=False,
        )
示例#20
0
    def __init__(self, cache_name, clock, max_len=0, expiry_ms=0,
                 reset_expiry_on_get=False, iterable=False):
        """
        Args:
            cache_name (str): Name of this cache, used for logging.
            clock (Clock)
            max_len (int): Max size of dict. If the dict grows larger than this
                then the oldest items get automatically evicted. Default is 0,
                which indicates there is no max limit.
            expiry_ms (int): How long before an item is evicted from the cache
                in milliseconds. Default is 0, indicating items never get
                evicted based on time.
            reset_expiry_on_get (bool): If true, will reset the expiry time for
                an item on access. Defaults to False.
            iterable (bool): If true, the size is calculated by summing the
                sizes of all entries, rather than the number of entries.

        """
        self._cache_name = cache_name

        self._clock = clock

        self._max_len = max_len
        self._expiry_ms = expiry_ms

        self._reset_expiry_on_get = reset_expiry_on_get

        self._cache = OrderedDict()

        self.iterable = iterable

        self.metrics = register_cache("expiring", cache_name, self)

        if not self._expiry_ms:
            # Don't bother starting the loop if things never expire
            return

        def f():
            return run_as_background_process(
                "prune_cache_%s" % self._cache_name,
                self._prune_cache,
            )

        self._clock.looping_call(f, self._expiry_ms / 2)
示例#21
0
    def __init__(self, hs: "HomeServer"):
        self.hs = hs
        self.store = hs.get_datastores().main
        self.clock = hs.get_clock()
        self._event_auth_handler = hs.get_event_auth_handler()

        # Used by `RulesForRoom` to ensure only one thing mutates the cache at a
        # time. Keyed off room_id.
        self._rules_linearizer = Linearizer(name="rules_for_room")

        self.room_push_rule_cache_metrics = register_cache(
            "cache",
            "room_push_rule_cache",
            cache=
            [],  # Meaningless size, as this isn't a cache that stores values,
            resizable=False,
        )

        # Whether to support MSC3772 is supported.
        self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled
logger = logging.getLogger(__name__)


rules_by_room = {}


push_rules_invalidation_counter = Counter(
    "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter", "")
push_rules_state_size_counter = Counter(
    "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "")

# Measures whether we use the fast path of using state deltas, or if we have to
# recalculate from scratch
push_rules_delta_state_cache_metric = register_cache(
    "cache",
    "push_rules_delta_state_cache_metric",
    cache=[],  # Meaningless size, as this isn't a cache that stores values
)


class BulkPushRuleEvaluator(object):
    """Calculates the outcome of push rules for an event for all users in the
    room at once.
    """

    def __init__(self, hs):
        self.hs = hs
        self.store = hs.get_datastore()
        self.auth = hs.get_auth()

        self.room_push_rule_cache_metrics = register_cache(
示例#23
0
    def __init__(
        self,
        max_size: int,
        cache_name: Optional[str] = None,
        cache_type: Type[Union[dict, TreeCache]] = dict,
        size_callback: Optional[Callable[[VT], int]] = None,
        metrics_collection_callback: Optional[Callable[[], None]] = None,
        apply_cache_factor_from_config: bool = True,
        clock: Optional[Clock] = None,
        prune_unread_entries: bool = True,
    ):
        """
        Args:
            max_size: The maximum amount of entries the cache can hold

            cache_name: The name of this cache, for the prometheus metrics. If unset,
                no metrics will be reported on this cache.

            cache_type (type):
                type of underlying cache to be used. Typically one of dict
                or TreeCache.

            size_callback (func(V) -> int | None):

            metrics_collection_callback:
                metrics collection callback. This is called early in the metrics
                collection process, before any of the metrics registered with the
                prometheus Registry are collected, so can be used to update any dynamic
                metrics.

                Ignored if cache_name is None.

            apply_cache_factor_from_config (bool): If true, `max_size` will be
                multiplied by a cache factor derived from the homeserver config

            clock:

            prune_unread_entries: If True, cache entries that haven't been read recently
                will be evicted from the cache in the background. Set to False to
                opt-out of this behaviour.
        """
        # Default `clock` to something sensible. Note that we rename it to
        # `real_clock` so that mypy doesn't think its still `Optional`.
        if clock is None:
            real_clock = Clock(cast(IReactorTime, reactor))
        else:
            real_clock = clock

        cache: Union[Dict[KT, _Node[KT, VT]], TreeCache] = cache_type()
        self.cache = cache  # Used for introspection.
        self.apply_cache_factor_from_config = apply_cache_factor_from_config

        # Save the original max size, and apply the default size factor.
        self._original_max_size = max_size
        # We previously didn't apply the cache factor here, and as such some caches were
        # not affected by the global cache factor. Add an option here to disable applying
        # the cache factor when a cache is created
        if apply_cache_factor_from_config:
            self.max_size = int(max_size *
                                cache_config.properties.default_factor_size)
        else:
            self.max_size = int(max_size)

        # register_cache might call our "set_cache_factor" callback; there's nothing to
        # do yet when we get resized.
        self._on_resize: Optional[Callable[[], None]] = None

        if cache_name is not None:
            metrics: Optional[CacheMetric] = register_cache(
                "lru_cache",
                cache_name,
                self,
                collect_callback=metrics_collection_callback,
            )
        else:
            metrics = None

        # this is exposed for access from outside this class
        self.metrics = metrics

        # We create a single weakref to self here so that we don't need to keep
        # creating more each time we create a `_Node`.
        weak_ref_to_self = weakref.ref(self)

        list_root = ListNode[_Node[KT, VT]].create_root_node()

        lock = threading.Lock()

        def evict() -> None:
            while cache_len() > self.max_size:
                # Get the last node in the list (i.e. the oldest node).
                todelete = list_root.prev_node

                # The list root should always have a valid `prev_node` if the
                # cache is not empty.
                assert todelete is not None

                # The node should always have a reference to a cache entry, as
                # we only drop the cache entry when we remove the node from the
                # list.
                node = todelete.get_cache_entry()
                assert node is not None

                evicted_len = delete_node(node)
                cache.pop(node.key, None)
                if metrics:
                    metrics.inc_evictions(EvictionReason.size, evicted_len)

        def synchronized(f: FT) -> FT:
            @wraps(f)
            def inner(*args: Any, **kwargs: Any) -> Any:
                with lock:
                    return f(*args, **kwargs)

            return cast(FT, inner)

        cached_cache_len = [0]
        if size_callback is not None:

            def cache_len() -> int:
                return cached_cache_len[0]

        else:

            def cache_len() -> int:
                return len(cache)

        self.len = synchronized(cache_len)

        def add_node(
            key: KT, value: VT,
            callbacks: Collection[Callable[[], None]] = ()) -> None:
            node: _Node[KT, VT] = _Node(
                list_root,
                key,
                value,
                weak_ref_to_self,
                real_clock,
                callbacks,
                prune_unread_entries,
            )
            cache[key] = node

            if size_callback:
                cached_cache_len[0] += size_callback(node.value)

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.inc_memory_usage(node.memory)

        def move_node_to_front(node: _Node[KT, VT]) -> None:
            node.move_to_front(real_clock, list_root)

        def delete_node(node: _Node[KT, VT]) -> int:
            node.drop_from_lists()

            deleted_len = 1
            if size_callback:
                deleted_len = size_callback(node.value)
                cached_cache_len[0] -= deleted_len

            node.run_and_clear_callbacks()

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.dec_memory_usage(node.memory)

            return deleted_len

        @overload
        def cache_get(
            key: KT,
            default: Literal[None] = None,
            callbacks: Collection[Callable[[], None]] = ...,
            update_metrics: bool = ...,
        ) -> Optional[VT]:
            ...

        @overload
        def cache_get(
            key: KT,
            default: T,
            callbacks: Collection[Callable[[], None]] = ...,
            update_metrics: bool = ...,
        ) -> Union[T, VT]:
            ...

        @synchronized
        def cache_get(
            key: KT,
            default: Optional[T] = None,
            callbacks: Collection[Callable[[], None]] = (),
            update_metrics: bool = True,
        ) -> Union[None, T, VT]:
            node = cache.get(key, None)
            if node is not None:
                move_node_to_front(node)
                node.add_callbacks(callbacks)
                if update_metrics and metrics:
                    metrics.inc_hits()
                return node.value
            else:
                if update_metrics and metrics:
                    metrics.inc_misses()
                return default

        @synchronized
        def cache_set(
            key: KT, value: VT,
            callbacks: Collection[Callable[[], None]] = ()) -> None:
            node = cache.get(key, None)
            if node is not None:
                # We sometimes store large objects, e.g. dicts, which cause
                # the inequality check to take a long time. So let's only do
                # the check if we have some callbacks to call.
                if value != node.value:
                    node.run_and_clear_callbacks()

                # We don't bother to protect this by value != node.value as
                # generally size_callback will be cheap compared with equality
                # checks. (For example, taking the size of two dicts is quicker
                # than comparing them for equality.)
                if size_callback:
                    cached_cache_len[0] -= size_callback(node.value)
                    cached_cache_len[0] += size_callback(value)

                node.add_callbacks(callbacks)

                move_node_to_front(node)
                node.value = value
            else:
                add_node(key, value, set(callbacks))

            evict()

        @synchronized
        def cache_set_default(key: KT, value: VT) -> VT:
            node = cache.get(key, None)
            if node is not None:
                return node.value
            else:
                add_node(key, value)
                evict()
                return value

        @overload
        def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]:
            ...

        @overload
        def cache_pop(key: KT, default: T) -> Union[T, VT]:
            ...

        @synchronized
        def cache_pop(key: KT,
                      default: Optional[T] = None) -> Union[None, T, VT]:
            node = cache.get(key, None)
            if node:
                evicted_len = delete_node(node)
                cache.pop(node.key, None)
                if metrics:
                    metrics.inc_evictions(EvictionReason.invalidation,
                                          evicted_len)
                return node.value
            else:
                return default

        @synchronized
        def cache_del_multi(key: KT) -> None:
            """Delete an entry, or tree of entries

            If the LruCache is backed by a regular dict, then "key" must be of
            the right type for this cache

            If the LruCache is backed by a TreeCache, then "key" must be a tuple, but
            may be of lower cardinality than the TreeCache - in which case the whole
            subtree is deleted.
            """
            popped = cache.pop(key, None)
            if popped is None:
                return
            # for each deleted node, we now need to remove it from the linked list
            # and run its callbacks.
            for leaf in iterate_tree_cache_entry(popped):
                delete_node(leaf)

        @synchronized
        def cache_clear() -> None:
            for node in cache.values():
                node.run_and_clear_callbacks()
                node.drop_from_lists()

            assert list_root.next_node == list_root
            assert list_root.prev_node == list_root

            cache.clear()
            if size_callback:
                cached_cache_len[0] = 0

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.clear_memory_usage()

        @synchronized
        def cache_contains(key: KT) -> bool:
            return key in cache

        # make sure that we clear out any excess entries after we get resized.
        self._on_resize = evict

        self.get = cache_get
        self.set = cache_set
        self.setdefault = cache_set_default
        self.pop = cache_pop
        self.del_multi = cache_del_multi
        # `invalidate` is exposed for consistency with DeferredCache, so that it can be
        # invalidated by the cache invalidation replication stream.
        self.invalidate = cache_del_multi
        self.len = synchronized(cache_len)
        self.contains = cache_contains
        self.clear = cache_clear
示例#24
0
        if not display_name:
            return False

        body = self._event.content.get("body", None)
        if not body:
            return False

        return _glob_matches(display_name, body, word_boundary=True)

    def _get_value(self, dotted_key):
        return self._value_cache.get(dotted_key, None)


# Caches (glob, word_boundary) -> regex for push. See _glob_matches
regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR)
register_cache("cache", "regex_push_cache", regex_cache)


def _glob_matches(glob, value, word_boundary=False):
    """Tests if value matches glob.

    Args:
        glob (string)
        value (string): String to test against glob.
        word_boundary (bool): Whether to match against word boundaries or entire
            string. Defaults to False.

    Returns:
        bool
    """
示例#25
0
        r = regex_cache.get((display_name, False, True), None)
        if not r:
            r = re.escape(display_name)
            r = _re_word_boundary(r)
            r = re.compile(r, flags=re.IGNORECASE)
            regex_cache[(display_name, False, True)] = r

        return r.search(body)

    def _get_value(self, dotted_key: str) -> str:
        return self._value_cache.get(dotted_key, None)


# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR)
register_cache("cache", "regex_push_cache", regex_cache)


def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
    """Tests if value matches glob.

    Args:
        glob
        value: String to test against glob.
        word_boundary: Whether to match against word boundaries or entire
            string. Defaults to False.
    """

    try:
        r = regex_cache.get((glob, True, word_boundary), None)
        if not r:
示例#26
0
    def __init__(
        self,
        max_size: int,
        cache_name: Optional[str] = None,
        keylen: int = 1,
        cache_type: Type[Union[dict, TreeCache]] = dict,
        size_callback: Optional[Callable] = None,
        metrics_collection_callback: Optional[Callable[[], None]] = None,
        apply_cache_factor_from_config: bool = True,
    ):
        """
        Args:
            max_size: The maximum amount of entries the cache can hold

            cache_name: The name of this cache, for the prometheus metrics. If unset,
                no metrics will be reported on this cache.

            keylen: The length of the tuple used as the cache key. Ignored unless
                cache_type is `TreeCache`.

            cache_type (type):
                type of underlying cache to be used. Typically one of dict
                or TreeCache.

            size_callback (func(V) -> int | None):

            metrics_collection_callback:
                metrics collection callback. This is called early in the metrics
                collection process, before any of the metrics registered with the
                prometheus Registry are collected, so can be used to update any dynamic
                metrics.

                Ignored if cache_name is None.

            apply_cache_factor_from_config (bool): If true, `max_size` will be
                multiplied by a cache factor derived from the homeserver config
        """
        cache = cache_type()
        self.cache = cache  # Used for introspection.
        self.apply_cache_factor_from_config = apply_cache_factor_from_config

        # Save the original max size, and apply the default size factor.
        self._original_max_size = max_size
        # We previously didn't apply the cache factor here, and as such some caches were
        # not affected by the global cache factor. Add an option here to disable applying
        # the cache factor when a cache is created
        if apply_cache_factor_from_config:
            self.max_size = int(max_size * cache_config.properties.default_factor_size)
        else:
            self.max_size = int(max_size)

        # register_cache might call our "set_cache_factor" callback; there's nothing to
        # do yet when we get resized.
        self._on_resize = None  # type: Optional[Callable[[],None]]

        if cache_name is not None:
            metrics = register_cache(
                "lru_cache",
                cache_name,
                self,
                collect_callback=metrics_collection_callback,
            )  # type: Optional[CacheMetric]
        else:
            metrics = None

        # this is exposed for access from outside this class
        self.metrics = metrics

        list_root = _Node(None, None, None, None)
        list_root.next_node = list_root
        list_root.prev_node = list_root

        lock = threading.Lock()

        def evict():
            while cache_len() > self.max_size:
                todelete = list_root.prev_node
                evicted_len = delete_node(todelete)
                cache.pop(todelete.key, None)
                if metrics:
                    metrics.inc_evictions(evicted_len)

        def synchronized(f: FT) -> FT:
            @wraps(f)
            def inner(*args, **kwargs):
                with lock:
                    return f(*args, **kwargs)

            return cast(FT, inner)

        cached_cache_len = [0]
        if size_callback is not None:

            def cache_len():
                return cached_cache_len[0]

        else:

            def cache_len():
                return len(cache)

        self.len = synchronized(cache_len)

        def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()):
            prev_node = list_root
            next_node = prev_node.next_node
            node = _Node(prev_node, next_node, key, value, callbacks)
            prev_node.next_node = node
            next_node.prev_node = node
            cache[key] = node

            if size_callback:
                cached_cache_len[0] += size_callback(node.value)

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.inc_memory_usage(node.memory)

        def move_node_to_front(node):
            prev_node = node.prev_node
            next_node = node.next_node
            prev_node.next_node = next_node
            next_node.prev_node = prev_node
            prev_node = list_root
            next_node = prev_node.next_node
            node.prev_node = prev_node
            node.next_node = next_node
            prev_node.next_node = node
            next_node.prev_node = node

        def delete_node(node):
            prev_node = node.prev_node
            next_node = node.next_node
            prev_node.next_node = next_node
            next_node.prev_node = prev_node

            deleted_len = 1
            if size_callback:
                deleted_len = size_callback(node.value)
                cached_cache_len[0] -= deleted_len

            node.run_and_clear_callbacks()

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.dec_memory_usage(node.memory)

            return deleted_len

        @overload
        def cache_get(
            key: KT,
            default: Literal[None] = None,
            callbacks: Collection[Callable[[], None]] = ...,
            update_metrics: bool = ...,
        ) -> Optional[VT]:
            ...

        @overload
        def cache_get(
            key: KT,
            default: T,
            callbacks: Collection[Callable[[], None]] = ...,
            update_metrics: bool = ...,
        ) -> Union[T, VT]:
            ...

        @synchronized
        def cache_get(
            key: KT,
            default: Optional[T] = None,
            callbacks: Collection[Callable[[], None]] = (),
            update_metrics: bool = True,
        ):
            node = cache.get(key, None)
            if node is not None:
                move_node_to_front(node)
                node.add_callbacks(callbacks)
                if update_metrics and metrics:
                    metrics.inc_hits()
                return node.value
            else:
                if update_metrics and metrics:
                    metrics.inc_misses()
                return default

        @synchronized
        def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()):
            node = cache.get(key, None)
            if node is not None:
                # We sometimes store large objects, e.g. dicts, which cause
                # the inequality check to take a long time. So let's only do
                # the check if we have some callbacks to call.
                if value != node.value:
                    node.run_and_clear_callbacks()

                # We don't bother to protect this by value != node.value as
                # generally size_callback will be cheap compared with equality
                # checks. (For example, taking the size of two dicts is quicker
                # than comparing them for equality.)
                if size_callback:
                    cached_cache_len[0] -= size_callback(node.value)
                    cached_cache_len[0] += size_callback(value)

                node.add_callbacks(callbacks)

                move_node_to_front(node)
                node.value = value
            else:
                add_node(key, value, set(callbacks))

            evict()

        @synchronized
        def cache_set_default(key: KT, value: VT) -> VT:
            node = cache.get(key, None)
            if node is not None:
                return node.value
            else:
                add_node(key, value)
                evict()
                return value

        @overload
        def cache_pop(key: KT, default: Literal[None] = None) -> Optional[VT]:
            ...

        @overload
        def cache_pop(key: KT, default: T) -> Union[T, VT]:
            ...

        @synchronized
        def cache_pop(key: KT, default: Optional[T] = None):
            node = cache.get(key, None)
            if node:
                delete_node(node)
                cache.pop(node.key, None)
                return node.value
            else:
                return default

        @synchronized
        def cache_del_multi(key: KT) -> None:
            """
            This will only work if constructed with cache_type=TreeCache
            """
            popped = cache.pop(key)
            if popped is None:
                return
            for leaf in enumerate_leaves(popped, keylen - len(cast(tuple, key))):
                delete_node(leaf)

        @synchronized
        def cache_clear() -> None:
            list_root.next_node = list_root
            list_root.prev_node = list_root
            for node in cache.values():
                node.run_and_clear_callbacks()
            cache.clear()
            if size_callback:
                cached_cache_len[0] = 0

            if caches.TRACK_MEMORY_USAGE and metrics:
                metrics.clear_memory_usage()

        @synchronized
        def cache_contains(key: KT) -> bool:
            return key in cache

        self.sentinel = object()

        # make sure that we clear out any excess entries after we get resized.
        self._on_resize = evict

        self.get = cache_get
        self.set = cache_set
        self.setdefault = cache_set_default
        self.pop = cache_pop
        # `invalidate` is exposed for consistency with DeferredCache, so that it can be
        # invalidated by the cache invalidation replication stream.
        self.invalidate = cache_pop
        if cache_type is TreeCache:
            self.del_multi = cache_del_multi
        self.len = synchronized(cache_len)
        self.contains = cache_contains
        self.clear = cache_clear