def __init__(self, reactor: IReactorTime, producer: IPullProducer, consumer: IConsumer): self._clock = Clock(reactor) self._producer = producer self._consumer = consumer # While running we use a looping call with a zero delay to call # resumeProducing on given producer. self._looping_call = None # type: Optional[LoopingCall] # We start writing next reactor tick. self._start_loop()
def test_sleep(self): clock = Clock(reactor) @defer.inlineCallbacks def competing_callback(): with LoggingContext("competing"): yield clock.sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) with LoggingContext("one"): yield clock.sleep(0) self._check_test_key("one")
def __init__( self, reactor, agent, well_known_cache=None, had_well_known_cache=None ): self._reactor = reactor self._clock = Clock(reactor) if well_known_cache is None: well_known_cache = _well_known_cache if had_well_known_cache is None: had_well_known_cache = _had_valid_well_known_cache self._well_known_cache = well_known_cache self._had_valid_well_known_cache = had_well_known_cache self._well_known_agent = RedirectAgent(agent)
def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwargs): """ Args: hostname : The hostname for the server. config: The full config for the homeserver. """ if not reactor: from twisted.internet import reactor self._reactor = reactor self.hostname = hostname self.config = config self._building = {} self._listening_services = [] self.start_time = None self._instance_id = random_string(5) self._instance_name = config.worker_name or "master" self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() self.admin_redaction_ratelimiter = Ratelimiter() self.registration_ratelimiter = Ratelimiter() self.datastores = None # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname])
async def testfunc(): self._check_test_key("one") d = Clock(reactor).sleep(0) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) await d self._check_test_key("one")
def __init__( self, name: Optional[str] = None, max_count: int = 1, clock: Optional[Clock] = None, ): """ Args: max_count: The maximum number of concurrent accesses """ if name is None: self.name: Union[str, int] = id(self) else: self.name = name if not clock: from twisted.internet import reactor assert isinstance(reactor, ReactorBase) clock = Clock(reactor) self._clock = clock self.max_count = max_count # key_to_defer is a map from the key to a _LinearizerEntry. self.key_to_defer: Dict[Hashable, _LinearizerEntry] = {}
async def make_homeserver(reactor, config=None): """ Make a Homeserver suitable for running benchmarks against. Args: reactor: A Twisted reactor to run under. config: A HomeServerConfig to use, or None. """ cleanup_tasks = [] clock = Clock(reactor) if not config: config = default_config("test") config_obj = HomeServerConfig() config_obj.parse_config_dict(config, "", "") hs = await setup_test_homeserver(cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock) stor = hs.get_datastore() # Run the database background updates. if hasattr(stor.db.updates, "do_next_background_update"): while not await stor.db.updates.has_completed_background_updates(): await stor.db.updates.do_next_background_update(1) def cleanup(): for i in cleanup_tasks: i() return hs, clock.sleep, cleanup
def setUp(self): self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.hs = setup_test_homeserver(self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock) self.auth = self.hs.get_auth() def get_user_by_access_token(token=None, allow_guest=False): return { "user": UserID.from_string(self.USER_ID), "token_id": 1, "is_guest": False, } def get_user_by_req(request, allow_guest=False, rights="access"): return synapse.types.create_requester( UserID.from_string(self.USER_ID), 1, False, None) self.auth.get_user_by_access_token = get_user_by_access_token self.auth.get_user_by_req = get_user_by_req self.store = self.hs.get_datastore() self.filtering = self.hs.get_filtering() self.resource = JsonResource(self.hs) for r in self.TO_REGISTER: r.register_servlets(self.hs, self.resource)
def __init__( self, reactor, tls_client_options_factory, _well_known_tls_policy=None, _srv_resolver=None, _well_known_cache=well_known_cache, ): self._reactor = reactor self._clock = Clock(reactor) self._tls_client_options_factory = tls_client_options_factory if _srv_resolver is None: _srv_resolver = SrvResolver() self._srv_resolver = _srv_resolver self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 agent_args = {} if _well_known_tls_policy is not None: # the param is called 'contextFactory', but actually passing a # contextfactory is deprecated, and it expects an IPolicyForHTTPS. agent_args['contextFactory'] = _well_known_tls_policy _well_known_agent = RedirectAgent( Agent(self._reactor, pool=self._pool, **agent_args), ) self._well_known_agent = _well_known_agent # our cache of .well-known lookup results, mapping from server name # to delegated name. The values can be: # `bytes`: a valid server-name # `None`: there is no (valid) .well-known here self._well_known_cache = _well_known_cache
def __init__( self, reactor, tls_client_options_factory, user_agent, _srv_resolver=None, _well_known_resolver=None, ): self._reactor = reactor self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 self._agent = Agent.usingEndpointFactory( self._reactor, MatrixHostnameEndpointFactory(reactor, tls_client_options_factory, _srv_resolver), pool=self._pool, ) self.user_agent = user_agent if _well_known_resolver is None: _well_known_resolver = WellKnownResolver( self._reactor, agent=Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, ), user_agent=self.user_agent, ) self._well_known_resolver = _well_known_resolver
def prepare(self, reactor, clock, hs): self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.url = "/_matrix/client/r0/register" self.registration_handler = Mock() self.auth_handler = Mock() self.device_handler = Mock()
def setUp(self): self.reactor = ThreadedMemoryReactorClock() self.hs_clock = Clock(self.reactor) self.homeserver = setup_test_homeserver(self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor)
def setUp(self): self.clock = ThreadedMemoryReactorClock() self.hs_clock = Clock(self.clock) self.url = "/_matrix/client/r0/admin/register" self.registration_handler = Mock() self.identity_handler = Mock() self.login_handler = Mock() self.device_handler = Mock() self.device_handler.check_device_registered = Mock(return_value="FAKE") self.datastore = Mock(return_value=Mock()) self.datastore.get_current_state_deltas = Mock(return_value=[]) self.secrets = Mock() self.hs = setup_test_homeserver(self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock) self.hs.config.registration_shared_secret = u"shared" self.hs.get_media_repository = Mock() self.hs.get_deactivate_account_handler = Mock() self.resource = JsonResource(self.hs) register_servlets(self.hs, self.resource)
def __init__( self, reactor, tls_client_options_factory, _srv_resolver=None, _well_known_cache=None, ): self._reactor = reactor self._clock = Clock(reactor) self._tls_client_options_factory = tls_client_options_factory if _srv_resolver is None: _srv_resolver = SrvResolver() self._srv_resolver = _srv_resolver self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 self._well_known_resolver = WellKnownResolver( self._reactor, agent=Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, ), well_known_cache=_well_known_cache, )
def test_sleep(self): clock = Clock(reactor) @defer.inlineCallbacks def competing_callback(): with LoggingContext() as competing_context: competing_context.request = "competing" yield clock.sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) with LoggingContext() as context_one: context_one.request = "one" yield clock.sleep(0) self._check_test_key("one")
def __init__( self, hostname: str, config: HomeServerConfig, reactor=None, version_string="Synapse", ): """ Args: hostname : The hostname for the server. config: The full config for the homeserver. """ if not reactor: from twisted.internet import reactor as _reactor reactor = _reactor self._reactor = reactor self.hostname = hostname # the key we use to sign events and requests self.signing_key = config.key.signing_key[0] self.config = config self._listening_services = [] # type: List[twisted.internet.tcp.Port] self.start_time = None # type: Optional[int] self._instance_id = random_string(5) self._instance_name = config.worker.instance_name self.clock = Clock(reactor) self.version_string = version_string self.datastores = None # type: Optional[Databases]
def __init__(self, hostname, reactor=None, **kwargs): """ Args: hostname : The hostname for the server. """ if not reactor: from twisted.internet import reactor self._reactor = reactor self.hostname = hostname self._building = {} self._listening_services = [] self.start_time = None self.clock = Clock(reactor) self.distributor = Distributor() self.ratelimiter = Ratelimiter() self.admin_redaction_ratelimiter = Ratelimiter() self.registration_ratelimiter = Ratelimiter() self.datastores = None # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname])
def setUp(self): self.reactor = ThreadedMemoryReactorClock() self.clock = Clock(self.reactor) self.hs = setup_test_homeserver( self.addCleanup, "red", http_client=None, clock=self.clock, reactor=self.reactor, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=["send_message"]), ) self.store = self.hs.get_datastore() self.hs.config.registrations_require_3pid = [] self.hs.config.enable_registration_captcha = False self.hs.config.recaptcha_public_key = [] self.hs.config.limit_usage_by_mau = True self.hs.config.hs_disabled = False self.hs.config.max_mau_value = 2 self.hs.config.mau_trial_days = 0 self.hs.config.server_notices_mxid = "@server:red" self.hs.config.server_notices_mxid_display_name = None self.hs.config.server_notices_mxid_avatar_url = None self.hs.config.server_notices_room_name = "Test Server Notice Room" self.resource = JsonResource(self.hs) register.register_servlets(self.hs, self.resource) sync.register_servlets(self.hs, self.resource)
def func(i, sleep=False): with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): self.assertEqual(LoggingContext.current_context(), lc) if sleep: yield Clock(reactor).sleep(0) self.assertEqual(LoggingContext.current_context(), lc)
def __init__( self, reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, ip_blacklist: IPSet, proxy_reactor: Optional[ISynapseReactor] = None, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): self._reactor = reactor self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 if proxy_reactor is None: self.proxy_reactor = reactor else: self.proxy_reactor = proxy_reactor proxies = getproxies() https_proxy = proxies["https"].encode() if "https" in proxies else None self._agent = Agent.usingEndpointFactory( self._reactor, MatrixHostnameEndpointFactory( reactor, self.proxy_reactor, tls_client_options_factory, _srv_resolver, https_proxy, ), pool=self._pool, ) self.user_agent = user_agent if _well_known_resolver is None: # Note that the name resolver has already been wrapped in a # IPBlacklistingResolver by MatrixFederationHttpClient. _well_known_resolver = WellKnownResolver( self._reactor, agent=BlacklistingAgentWrapper( ProxyAgent( self._reactor, self.proxy_reactor, pool=self._pool, contextFactory=tls_client_options_factory, use_proxy=True, ), ip_blacklist=ip_blacklist, ), user_agent=self.user_agent, ) self._well_known_resolver = _well_known_resolver
class _PullToPushProducer: """A push producer that wraps a pull producer.""" def __init__( self, reactor: IReactorTime, producer: IPullProducer, consumer: IConsumer ): self._clock = Clock(reactor) self._producer = producer self._consumer = consumer # While running we use a looping call with a zero delay to call # resumeProducing on given producer. self._looping_call = None # type: Optional[LoopingCall] # We start writing next reactor tick. self._start_loop() def _start_loop(self): """Start the looping call to""" if not self._looping_call: # Start a looping call which runs every tick. self._looping_call = self._clock.looping_call(self._run_once, 0) def stop(self): """Stops calling resumeProducing.""" if self._looping_call: self._looping_call.stop() self._looping_call = None def pauseProducing(self): """Implements IPushProducer""" self.stop() def resumeProducing(self): """Implements IPushProducer""" self._start_loop() def stopProducing(self): """Implements IPushProducer""" self.stop() self._producer.stopProducing() def _run_once(self): """Calls resumeProducing on producer once.""" try: self._producer.resumeProducing() except Exception: logger.exception("Failed to call resumeProducing") try: self._consumer.unregisterProducer() except Exception: pass self.stopProducing()
def prepare(self, reactor, clock, hs): self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.url = "/_matrix/client/r0/register" self.registration_handler = Mock() self.auth_handler = Mock() self.device_handler = Mock() hs.config.enable_registration = True hs.config.registrations_require_3pid = [] hs.config.auto_join_rooms = [] hs.config.enable_registration_captcha = False
def test_overlapping_spans(self) -> None: """Overlapping spans which are not neatly nested should work""" reactor = MemoryReactorClock() clock = Clock(reactor) scopes = [] async def task(i: int): scope = start_active_span( f"task{i}", tracer=self._tracer, ) scopes.append(scope) self.assertEqual(self._tracer.active_span, scope.span) await clock.sleep(4) self.assertEqual(self._tracer.active_span, scope.span) scope.close() async def root(): with start_active_span("root span", tracer=self._tracer) as root_scope: self.assertEqual(self._tracer.active_span, root_scope.span) scopes.append(root_scope) d1 = run_in_background(task, 1) await clock.sleep(2) d2 = run_in_background(task, 2) # because we did run_in_background, the active span should still be the # root. self.assertEqual(self._tracer.active_span, root_scope.span) await make_deferred_yieldable( defer.gatherResults([d1, d2], consumeErrors=True) ) self.assertEqual(self._tracer.active_span, root_scope.span) with LoggingContext("root context"): # start the test off d1 = defer.ensureDeferred(root()) # let the tasks complete reactor.pump((2,) * 8) self.successResultOf(d1) self.assertIsNone(self._tracer.active_span) # the spans should be reported in order of their finishing: task 1, task 2, # root. self.assertEqual( self._reporter.get_spans(), [scopes[1].span, scopes[2].span, scopes[0].span], )
def __init__(self, name=None, clock=None): if name is None: self.name = id(self) else: self.name = name self.key_to_defer = {} if not clock: from twisted.internet import reactor clock = Clock(reactor) self._clock = clock
def __init__( self, reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, ip_whitelist: IPSet, ip_blacklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, ): # proxy_reactor is not blacklisted proxy_reactor = reactor # We need to use a DNS resolver which filters out blacklisted IP # addresses, to prevent DNS rebinding. reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist) self._clock = Clock(reactor) self._pool = HTTPConnectionPool(reactor) self._pool.retryAutomatically = False self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 self._agent = Agent.usingEndpointFactory( reactor, MatrixHostnameEndpointFactory( reactor, proxy_reactor, tls_client_options_factory, _srv_resolver, ), pool=self._pool, ) self.user_agent = user_agent if _well_known_resolver is None: _well_known_resolver = WellKnownResolver( reactor, agent=BlacklistingAgentWrapper( ProxyAgent( reactor, proxy_reactor, pool=self._pool, contextFactory=tls_client_options_factory, use_proxy=True, ), ip_blacklist=ip_blacklist, ), user_agent=self.user_agent, ) self._well_known_resolver = _well_known_resolver
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): super().prepare(reactor, clock, hs) # poke the other server's signing key into the key store, so that we don't # make requests for it verify_key = signedjson.key.get_verify_key( self.OTHER_SERVER_SIGNATURE_KEY) verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) self.get_success(hs.get_datastores().main.store_server_verify_keys( from_server=self.OTHER_SERVER_NAME, ts_added_ms=clock.time_msec(), verify_keys=[( self.OTHER_SERVER_NAME, verify_key_id, FetchKeyResult( verify_key=verify_key, valid_until_ts=clock.time_msec() + 1000, ), )], ))
def __init__( self, reactor: IReactorTime, agent: IAgent, user_agent: bytes, well_known_cache: Optional[TTLCache] = None, had_well_known_cache: Optional[TTLCache] = None, ): self._reactor = reactor self._clock = Clock(reactor) if well_known_cache is None: well_known_cache = _well_known_cache if had_well_known_cache is None: had_well_known_cache = _had_valid_well_known_cache self._well_known_cache = well_known_cache self._had_valid_well_known_cache = had_well_known_cache self._well_known_agent = RedirectAgent(agent) self.user_agent = user_agent
def __init__(self, hostname, **kwargs): """ Args: hostname : The hostname for the server. """ self.hostname = hostname self._building = {} self.clock = Clock() self.distributor = Distributor() self.ratelimiter = Ratelimiter() # Other kwargs are explicit dependencies for depname in kwargs: setattr(self, depname, kwargs[depname])
def setUp(self): self.registration_handler = Mock() self.appservice = Mock(sender="@as:test") self.datastore = Mock(get_app_service_by_token=Mock( return_value=self.appservice)) handlers = Mock(registration_handler=self.registration_handler) self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.hs = self.hs = setup_test_homeserver(http_client=None, clock=self.hs_clock, reactor=self.clock) self.hs.get_datastore = Mock(return_value=self.datastore) self.hs.get_handlers = Mock(return_value=handlers)
def setUp(self): self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.url = b"/_matrix/client/r0/register" self.appservice = None self.auth = Mock(get_appservice_by_req=Mock( side_effect=lambda x: self.appservice)) self.auth_result = failure.Failure( InteractiveAuthIncompleteError(None)) self.auth_handler = Mock( check_auth=Mock(side_effect=lambda x, y, z: self.auth_result), get_session_data=Mock(return_value=None), ) self.registration_handler = Mock() self.identity_handler = Mock() self.login_handler = Mock() self.device_handler = Mock() self.device_handler.check_device_registered = Mock(return_value="FAKE") self.datastore = Mock(return_value=Mock()) self.datastore.get_current_state_deltas = Mock(return_value=[]) # do the dance to hook it up to the hs global self.handlers = Mock( registration_handler=self.registration_handler, identity_handler=self.identity_handler, login_handler=self.login_handler, ) self.hs = setup_test_homeserver(self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.clock) self.hs.get_auth = Mock(return_value=self.auth) self.hs.get_handlers = Mock(return_value=self.handlers) self.hs.get_auth_handler = Mock(return_value=self.auth_handler) self.hs.get_device_handler = Mock(return_value=self.device_handler) self.hs.get_datastore = Mock(return_value=self.datastore) self.hs.config.enable_registration = True self.hs.config.registrations_require_3pid = [] self.hs.config.auto_join_rooms = [] self.resource = JsonResource(self.hs) register_servlets(self.hs, self.resource)
async def _expire_old_entries(clock: Clock, expiry_seconds: int) -> None: """Walks the global cache list to find cache entries that haven't been accessed in the given number of seconds. """ now = int(clock.time()) node = GLOBAL_ROOT.prev_node assert node is not None i = 0 logger.debug("Searching for stale caches") while node is not GLOBAL_ROOT: # Only the root node isn't a `_TimedListNode`. assert isinstance(node, _TimedListNode) if node.last_access_ts_secs > now - expiry_seconds: break cache_entry = node.get_cache_entry() next_node = node.prev_node # The node should always have a reference to a cache entry and a valid # `prev_node`, as we only drop them when we remove the node from the # list. assert next_node is not None assert cache_entry is not None cache_entry.drop_from_cache() # If we do lots of work at once we yield to allow other stuff to happen. if (i + 1) % 10000 == 0: logger.debug("Waiting during drop") await clock.sleep(0) logger.debug("Waking during drop") node = next_node # If we've yielded then our current node may have been evicted, so we # need to check that its still valid. if node.prev_node is None: break i += 1 logger.info("Dropped %d items from caches", i)
def test_verify_json_objects_for_server_awaits_previous_requests(self): clock = Clock(reactor) key1 = signedjson.key.generate_signing_key(1) kr = keyring.Keyring(self.hs) json1 = {} signedjson.sign.sign_json(json1, "server10", key1) persp_resp = { "server_keys": [ self.mock_perspective_server.get_signed_key( "server10", signedjson.key.get_verify_key(key1) ) ] } persp_deferred = defer.Deferred() @defer.inlineCallbacks def get_perspectives(**kwargs): self.assertEquals(LoggingContext.current_context().request, "11") with logcontext.PreserveLoggingContext(): yield persp_deferred defer.returnValue(persp_resp) self.http_client.post_json.side_effect = get_perspectives with LoggingContext("11") as context_11: context_11.request = "11" # start off a first set of lookups res_deferreds = kr.verify_json_objects_for_server( [("server10", json1), ("server11", {})] ) # the unsigned json should be rejected pretty quickly self.assertTrue(res_deferreds[1].called) try: yield res_deferreds[1] self.assertFalse("unsigned json didn't cause a failure") except SynapseError: pass self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) # wait a tick for it to send the request to the perspectives server # (it first tries the datastore) yield clock.sleep(1) # XXX find out why this takes so long! self.http_client.post_json.assert_called_once() self.assertIs(LoggingContext.current_context(), context_11) context_12 = LoggingContext("12") context_12.request = "12" with logcontext.PreserveLoggingContext(context_12): # a second request for a server with outstanding requests # should block rather than start a second call self.http_client.post_json.reset_mock() self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1)] ) yield clock.sleep(1) self.http_client.post_json.assert_not_called() res_deferreds_2[0].addBoth(self.check_context, None) # complete the first request with logcontext.PreserveLoggingContext(): persp_deferred.callback(persp_resp) self.assertIs(LoggingContext.current_context(), context_11) with logcontext.PreserveLoggingContext(): yield res_deferreds[0] yield res_deferreds_2[0]