def test_evict(self): setup_expire_lru_cache_entries(self.hs) cache = LruCache(5, clock=self.hs.get_clock()) # Check that we evict entries we haven't accessed for 30 minutes. cache["key1"] = 1 cache["key2"] = 2 self.reactor.advance(20 * 60) self.assertEqual(cache.get("key1"), 1) self.reactor.advance(20 * 60) # We have only touched `key1` in the last 30m, so we expect that to # still be in the cache while `key2` should have been evicted. self.assertEqual(cache.get("key1"), 1) self.assertEqual(cache.get("key2"), None) # Check that re-adding an expired key works correctly. cache["key2"] = 3 self.assertEqual(cache.get("key2"), 3) self.reactor.advance(20 * 60) self.assertEqual(cache.get("key2"), 3) self.reactor.advance(20 * 60) self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), 3)
def test_evict_memory(self, jemalloc_interface) -> None: mock_jemalloc_class = Mock(spec=JemallocStats) jemalloc_interface.return_value = mock_jemalloc_class # set the return value of get_stat() to be greater than max_cache_memory_usage mock_jemalloc_class.get_stat.return_value = 924288000 setup_expire_lru_cache_entries(self.hs) cache = LruCache(4, clock=self.hs.get_clock()) cache["key1"] = 1 cache["key2"] = 2 # advance the reactor less than the min_cache_ttl self.reactor.advance(60 * 2) # our items should still be in the cache self.assertEqual(cache.get("key1"), 1) self.assertEqual(cache.get("key2"), 2) # advance the reactor past the min_cache_ttl self.reactor.advance(60 * 6) # the items should be cleared from cache self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), None) # add more stuff to caches cache["key1"] = 1 cache["key2"] = 2 # set the return value of get_stat() to be lower than target_cache_memory_usage mock_jemalloc_class.get_stat.return_value = 10000 # advance the reactor past the min_cache_ttl self.reactor.advance(60 * 6) # the items should still be in the cache self.assertEqual(cache.get("key1"), 1) self.assertEqual(cache.get("key2"), 2)
async def start(hs: "HomeServer") -> None: """ Start a Synapse server or worker. Should be called once the reactor is running. Will start the main HTTP listeners and do some other startup tasks, and then notify systemd. Args: hs: homeserver instance """ reactor = hs.get_reactor() # We want to use a separate thread pool for the resolver so that large # numbers of DNS requests don't starve out other users of the threadpool. resolver_threadpool = ThreadPool(name="gai_resolver") resolver_threadpool.start() reactor.addSystemEventTrigger("during", "shutdown", resolver_threadpool.stop) reactor.installNameResolver( GAIResolver(reactor, getThreadPool=lambda: resolver_threadpool)) # Register the threadpools with our metrics. register_threadpool("default", reactor.getThreadPool()) register_threadpool("gai_resolver", resolver_threadpool) # Set up the SIGHUP machinery. if hasattr(signal, "SIGHUP"): @wrap_as_background_process("sighup") async def handle_sighup(*args: Any, **kwargs: Any) -> None: # Tell systemd our state, if we're using it. This will silently fail if # we're not using systemd. sdnotify(b"RELOADING=1") for i, args, kwargs in _sighup_callbacks: i(*args, **kwargs) sdnotify(b"READY=1") # We defer running the sighup handlers until next reactor tick. This # is so that we're in a sane state, e.g. flushing the logs may fail # if the sighup happens in the middle of writing a log entry. def run_sighup(*args: Any, **kwargs: Any) -> None: # `callFromThread` should be "signal safe" as well as thread # safe. reactor.callFromThread(handle_sighup, *args, **kwargs) signal.signal(signal.SIGHUP, run_sighup) register_sighup(refresh_certificate, hs) register_sighup(reload_cache_config, hs.config) # Apply the cache config. hs.config.caches.resize_all_caches() # Load the certificate from disk. refresh_certificate(hs) # Start the tracer init_tracer(hs) # noqa # Instantiate the modules so they can register their web resources to the module API # before we start the listeners. module_api = hs.get_module_api() for module, config in hs.config.modules.loaded_modules: m = module(config=config, api=module_api) logger.info("Loaded module %s", m) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) load_legacy_presence_router(hs) load_legacy_password_auth_providers(hs) # If we've configured an expiry time for caches, start the background job now. setup_expire_lru_cache_entries(hs) # It is now safe to start your Synapse. hs.start_listening() hs.get_datastores().main.db_pool.start_profiling() hs.get_pusherpool().start() # Log when we start the shut down process. hs.get_reactor().addSystemEventTrigger("before", "shutdown", logger.info, "Shutting down...") setup_sentry(hs) setup_sdnotify(hs) # If background tasks are running on the main process, start collecting the # phone home stats. if hs.config.worker.run_background_tasks: start_phone_stats_home(hs) # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). # # PyPy does not (yet?) implement gc.freeze() if hasattr(gc, "freeze"): gc.collect() gc.freeze() # Speed up shutdowns by freezing all allocated objects. This moves everything # into the permanent generation and excludes them from the final GC. atexit.register(gc.freeze)
async def start(hs: "HomeServer"): """ Start a Synapse server or worker. Should be called once the reactor is running. Will start the main HTTP listeners and do some other startup tasks, and then notify systemd. Args: hs: homeserver instance """ # Set up the SIGHUP machinery. if hasattr(signal, "SIGHUP"): reactor = hs.get_reactor() @wrap_as_background_process("sighup") def handle_sighup(*args, **kwargs): # Tell systemd our state, if we're using it. This will silently fail if # we're not using systemd. sdnotify(b"RELOADING=1") for i, args, kwargs in _sighup_callbacks: i(*args, **kwargs) sdnotify(b"READY=1") # We defer running the sighup handlers until next reactor tick. This # is so that we're in a sane state, e.g. flushing the logs may fail # if the sighup happens in the middle of writing a log entry. def run_sighup(*args, **kwargs): # `callFromThread` should be "signal safe" as well as thread # safe. reactor.callFromThread(handle_sighup, *args, **kwargs) signal.signal(signal.SIGHUP, run_sighup) register_sighup(refresh_certificate, hs) # Load the certificate from disk. refresh_certificate(hs) # Start the tracer synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa # Instantiate the modules so they can register their web resources to the module API # before we start the listeners. module_api = hs.get_module_api() for module, config in hs.config.modules.loaded_modules: module(config=config, api=module_api) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) load_legacy_presence_router(hs) # If we've configured an expiry time for caches, start the background job now. setup_expire_lru_cache_entries(hs) # It is now safe to start your Synapse. hs.start_listening() hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() # Log when we start the shut down process. hs.get_reactor().addSystemEventTrigger( "before", "shutdown", logger.info, "Shutting down..." ) setup_sentry(hs) setup_sdnotify(hs) # If background tasks are running on the main process, start collecting the # phone home stats. if hs.config.run_background_tasks: start_phone_stats_home(hs) # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). # # This only works on Python 3.7 if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7): gc.collect() gc.freeze()