def prepare(self, reactor, clock, hs): self.master_store = self.hs.get_datastore() self.storage = hs.get_storage() self.slaved_store = self.STORE_TYPE(Database(hs), self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer handler_factory = Mock() self.replication_handler = ReplicationClientHandler(self.slaved_store) self.replication_handler.factory = handler_factory client_factory = ReplicationClientFactory(self.hs, "client_name", self.replication_handler) server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) client.makeConnection(FakeTransport(server, reactor)) self.server_to_client_transport = FakeTransport(client, reactor) server.makeConnection(self.server_to_client_transport)
def prepare(self, reactor, clock, hs): # build a replication server server_factory = ReplicationStreamProtocolFactory(hs) self.streamer = hs.get_replication_streamer() self.server = server_factory.buildProtocol(None) # Make a new HomeServer object for the worker self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( http_client=None, homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, ) # Since we use sqlite in memory databases we need to make sure the # databases objects are the same. self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool self.test_handler = self._build_replication_data_handler() self.worker_hs.replication_data_handler = self.test_handler repl_handler = ReplicationCommandHandler(self.worker_hs) self.client = ClientReplicationStreamProtocol( self.worker_hs, "client", "test", clock, repl_handler, ) self._client_transport = None self._server_transport = None
def prepare(self, reactor, clock, hs): db_config = hs.config.database.get_single_database() self.master_store = self.hs.get_datastore() self.storage = hs.get_storage() database = hs.get_datastores().databases[0] self.slaved_store = self.STORE_TYPE( database, make_conn(db_config, database.engine), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = hs.get_replication_streamer() # We now do some gut wrenching so that we have a client that is based # off of the slave store rather than the main store. self.replication_handler = ReplicationCommandHandler(self.hs) self.replication_handler._instance_name = "worker" self.replication_handler._replication_data_handler = ReplicationDataHandler( self.slaved_store) client_factory = DirectTcpReplicationClientFactory( self.hs, "client_name", self.replication_handler) client_factory.handler = self.replication_handler server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) client.makeConnection(FakeTransport(server, reactor)) self.server_to_client_transport = FakeTransport(client, reactor) server.makeConnection(self.server_to_client_transport)
def prepare(self, reactor, clock, hs): # build a replication server server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer server = server_factory.buildProtocol(None) # build a replication client, with a dummy handler self.test_handler = TestReplicationClientHandler() self.client = ClientReplicationStreamProtocol("client", "test", clock, self.test_handler) # wire them together self.client.makeConnection(FakeTransport(server, reactor)) server.makeConnection(FakeTransport(self.client, reactor))
def prepare(self, reactor, clock, hs): # build a replication server server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer server = server_factory.buildProtocol(None) # build a replication client, with a dummy handler self.test_handler = TestReplicationClientHandler() self.client = ClientReplicationStreamProtocol( "client", "test", clock, self.test_handler ) # wire them together self.client.makeConnection(FakeTransport(server, reactor)) server.makeConnection(FakeTransport(self.client, reactor))
def start_listening(self): config = self.get_config() for listener in config.listeners: if listener["type"] == "http": self._listener_http(config, listener) elif listener["type"] == "manhole": bind_addresses = listener["bind_addresses"] for address in bind_addresses: reactor.listenTCP(listener["port"], manhole( username="******", password="******", globals={"hs": self}, ), interface=address) elif listener["type"] == "replication": bind_addresses = listener["bind_addresses"] for address in bind_addresses: factory = ReplicationStreamProtocolFactory(self) server_listener = reactor.listenTCP(listener["port"], factory, interface=address) reactor.addSystemEventTrigger( "before", "shutdown", server_listener.stopListening, ) else: logger.warn("Unrecognized listener type: %s", listener["type"])
def start_listening(self, listeners): config = self.get_config() for listener in listeners: if listener["type"] == "http": self._listening_services.extend( self._listener_http(config, listener)) elif listener["type"] == "manhole": listen_tcp( listener["bind_addresses"], listener["port"], manhole(username="******", password="******", globals={"hs": self}), ) elif listener["type"] == "replication": services = listen_tcp( listener["bind_addresses"], listener["port"], ReplicationStreamProtocolFactory(self), ) for s in services: reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: logger.warn(("Metrics listener configured, but " "enable_metrics is not True!")) else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: logger.warn("Unrecognized listener type: %s", listener["type"])
def setUp(self): self.hs = yield setup_test_homeserver( "blue", http_client=None, replication_layer=Mock(), ratelimiter=NonCallableMock(spec_set=[ "send_message", ]), ) self.hs.get_ratelimiter().send_message.return_value = (True, 0) self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) listener = reactor.listenUNIX("\0xxx", server_factory) self.addCleanup(listener.stopListening) self.streamer = server_factory.streamer self.replication_handler = ReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) client_connector = reactor.connectUNIX("\0xxx", client_factory) self.addCleanup(client_factory.stopTrying) self.addCleanup(client_connector.disconnect)
def start_listening(self): if self.config.redis_enabled: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). self.get_tcp_replication().start_replication(self) for listener in self.config.server.listeners: if listener.type == "http": self._listening_services.extend( self._listener_http(self.config, listener)) elif listener.type == "manhole": _base.listen_manhole(listener.bind_addresses, listener.port, manhole_globals={"hs": self}) elif listener.type == "replication": services = listen_tcp( listener.bind_addresses, listener.port, ReplicationStreamProtocolFactory(self), ) for s in services: reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener.type == "metrics": if not self.config.enable_metrics: logger.warning("Metrics listener configured, but " "enable_metrics is not True!") else: _base.listen_metrics(listener.bind_addresses, listener.port) else: # this shouldn't happen, as the listener type should have been checked # during parsing logger.warning("Unrecognized listener type: %s", listener.type)
def setUp(self): self.hs = yield setup_test_homeserver( "blue", http_client=None, federation_client=Mock(), ratelimiter=NonCallableMock(spec_set=[ "send_message", ]), ) self.hs.get_ratelimiter().send_message.return_value = (True, 0) self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) # XXX: mktemp is unsafe and should never be used. but we're just a test. path = tempfile.mktemp(prefix="base_slaved_store_test_case_socket") listener = reactor.listenUNIX(path, server_factory) self.addCleanup(listener.stopListening) self.streamer = server_factory.streamer self.replication_handler = TestReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) client_connector = reactor.connectUNIX(path, client_factory) self.addCleanup(client_factory.stopTrying) self.addCleanup(client_connector.disconnect)
def prepare(self, reactor, clock, hs): self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer self.replication_handler = ReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory(self.hs, "client_name", self.replication_handler) server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) @attr.s class FakeTransport(object): other = attr.ib() disconnecting = False buffer = attr.ib(default=b'') def registerProducer(self, producer, streaming): self.producer = producer def _produce(): self.producer.resumeProducing() reactor.callLater(0.1, _produce) reactor.callLater(0.0, _produce) def write(self, byt): self.buffer = self.buffer + byt if getattr(self.other, "transport") is not None: self.other.dataReceived(self.buffer) self.buffer = b"" def writeSequence(self, seq): for x in seq: self.write(x) client.makeConnection(FakeTransport(server)) server.makeConnection(FakeTransport(client))
def setUp(self): super().setUp() # build a replication server self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() # Fake in memory Redis server that servers can connect to. self._redis_server = FakeRedisPubSubServer() # We may have an attempt to connect to redis for the external cache already. self.connect_any_redis_attempts() store = self.hs.get_datastores().main self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["localhost"] = "127.0.0.1" # A map from a HS instance to the associated HTTP Site to use for # handling inbound HTTP requests to that instance. self._hs_to_site = {self.hs: self.site} if self.hs.config.redis.redis_enabled: # Handle attempts to connect to fake redis server. self.reactor.add_tcp_client_callback( "localhost", 6379, self.connect_any_redis_attempts, ) self.hs.get_replication_command_handler().start_replication( self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). # # Register the master replication listener: self.reactor.add_tcp_client_callback( "1.2.3.4", 8765, lambda: self._handle_http_replication_attempt(self.hs, 8765), )
def prepare(self, reactor, clock, hs): self.master_store = self.hs.get_datastore() self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs) self.event_id = 0 server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer self.replication_handler = ReplicationClientHandler(self.slaved_store) client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) server = server_factory.buildProtocol(None) client = client_factory.buildProtocol(None) client.makeConnection(FakeTransport(server, reactor)) server.makeConnection(FakeTransport(client, reactor))
def prepare(self, reactor, clock, hs): # build a replication server server_factory = ReplicationStreamProtocolFactory(hs) self.streamer = hs.get_replication_streamer() self.server: ServerReplicationStreamProtocol = server_factory.buildProtocol( IPv4Address("TCP", "127.0.0.1", 0)) # Make a new HomeServer object for the worker self.reactor.lookups["testserv"] = "1.2.3.4" self.worker_hs = self.setup_test_homeserver( federation_http_client=None, homeserver_to_use=GenericWorkerServer, config=self._get_worker_hs_config(), reactor=self.reactor, ) # Since we use sqlite in memory databases we need to make sure the # databases objects are the same. self.worker_hs.get_datastores().main.db_pool = hs.get_datastores( ).main.db_pool # Normally we'd pass in the handler to `setup_test_homeserver`, which would # eventually hit "Install @cache_in_self attributes" in tests/utils.py. # Unfortunately our handler wants a reference to the homeserver. That leaves # us with a chicken-and-egg problem. # We can workaround this: create the homeserver first, create the handler # and bodge it in after the fact. The bodging requires us to know the # dirty details of how `cache_in_self` works. We politely ask mypy to # ignore our dirty dealings. self.test_handler = self._build_replication_data_handler() self.worker_hs._replication_data_handler = self.test_handler # type: ignore[attr-defined] repl_handler = ReplicationCommandHandler(self.worker_hs) self.client = ClientReplicationStreamProtocol( self.worker_hs, "client", "test", clock, repl_handler, ) self._client_transport = None self._server_transport = None
def setUp(self): super().setUp() # build a replication server self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() store = self.hs.get_datastore() self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" self._worker_hs_to_resource = {} # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). self.reactor.add_tcp_client_callback( "1.2.3.4", 8765, self._handle_http_replication_attempt)
def start_listening(self, listeners): config = self.get_config() if config.redis_enabled: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). self.get_tcp_replication().start_replication(self) for listener in listeners: if listener["type"] == "http": self._listening_services.extend(self._listener_http(config, listener)) elif listener["type"] == "manhole": listen_tcp( listener["bind_addresses"], listener["port"], manhole( username="******", password="******", globals={"hs": self} ), ) elif listener["type"] == "replication": services = listen_tcp( listener["bind_addresses"], listener["port"], ReplicationStreamProtocolFactory(self), ) for s in services: reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener["type"] == "metrics": if not self.get_config().enable_metrics: logger.warning( ( "Metrics listener configured, but " "enable_metrics is not True!" ) ) else: _base.listen_metrics(listener["bind_addresses"], listener["port"]) else: logger.warning("Unrecognized listener type: %s", listener["type"])
class RemoteServerUpTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): self.factory = ReplicationStreamProtocolFactory(hs) def _make_client(self) -> Tuple[IProtocol, StringTransport]: """Create a new direct TCP replication connection """ proto = self.factory.buildProtocol(("127.0.0.1", 0)) transport = StringTransport() proto.makeConnection(transport) # We can safely ignore the commands received during connection. self.pump() transport.clear() return proto, transport def test_relay(self): """Test that Synapse will relay REMOTE_SERVER_UP commands to all other connections, but not the one that sent it. """ proto1, transport1 = self._make_client() # We shouldn't receive an echo. proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n") self.pump() self.assertEqual(transport1.value(), b"") # But we should see an echo if we connect another client proto2, transport2 = self._make_client() proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n") self.pump() self.assertEqual(transport1.value(), b"") self.assertEqual(transport2.value(), b"REMOTE_SERVER_UP example.com\n")
class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): """Base class for tests running multiple workers. Automatically handle HTTP replication requests from workers to master, unlike `BaseStreamTestCase`. """ servlets = [] # type: List[Callable[[HomeServer, JsonResource], None]] def setUp(self): super().setUp() # build a replication server self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() # Fake in memory Redis server that servers can connect to. self._redis_server = FakeRedisPubSubServer() store = self.hs.get_datastore() self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" self.reactor.lookups["localhost"] = "127.0.0.1" # A map from a HS instance to the associated HTTP Site to use for # handling inbound HTTP requests to that instance. self._hs_to_site = {self.hs: self.site} if self.hs.config.redis.redis_enabled: # Handle attempts to connect to fake redis server. self.reactor.add_tcp_client_callback( "localhost", 6379, self.connect_any_redis_attempts, ) self.hs.get_tcp_replication().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). # # Register the master replication listener: self.reactor.add_tcp_client_callback( "1.2.3.4", 8765, lambda: self._handle_http_replication_attempt(self.hs, 8765), ) def create_test_json_resource(self): """Overrides `HomeserverTestCase.create_test_json_resource`. """ # We override this so that it automatically registers all the HTTP # replication servlets, without having to explicitly do that in all # subclassses. resource = ReplicationRestResource(self.hs) for servlet in self.servlets: servlet(self.hs, resource) return resource def make_worker_hs(self, worker_app: str, extra_config: dict = {}, **kwargs) -> HomeServer: """Make a new worker HS instance, correctly connecting replcation stream to the master HS. Args: worker_app: Type of worker, e.g. `synapse.app.federation_sender`. extra_config: Any extra config to use for this instances. **kwargs: Options that get passed to `self.setup_test_homeserver`, useful to e.g. pass some mocks for things like `http_client` Returns: The new worker HomeServer instance. """ config = self._get_worker_hs_config() config["worker_app"] = worker_app config.update(extra_config) worker_hs = self.setup_test_homeserver( homeserver_to_use=GenericWorkerServer, config=config, reactor=self.reactor, **kwargs, ) # If the instance is in the `instance_map` config then workers may try # and send HTTP requests to it, so we register it with # `_handle_http_replication_attempt` like we do with the master HS. instance_name = worker_hs.get_instance_name() instance_loc = worker_hs.config.worker.instance_map.get(instance_name) if instance_loc: # Ensure the host is one that has a fake DNS entry. if instance_loc.host not in self.reactor.lookups: raise Exception( "Host does not have an IP for instance_map[%r].host = %r" % ( instance_name, instance_loc.host, )) self.reactor.add_tcp_client_callback( self.reactor.lookups[instance_loc.host], instance_loc.port, lambda: self._handle_http_replication_attempt( worker_hs, instance_loc.port), ) store = worker_hs.get_datastore() store.db_pool._db_pool = self.database_pool._db_pool # Set up TCP replication between master and the new worker if we don't # have Redis support enabled. if not worker_hs.config.redis_enabled: repl_handler = ReplicationCommandHandler(worker_hs) client = ClientReplicationStreamProtocol( worker_hs, "client", "test", self.clock, repl_handler, ) server = self.server_factory.buildProtocol(None) client_transport = FakeTransport(server, self.reactor) client.makeConnection(client_transport) server_transport = FakeTransport(client, self.reactor) server.makeConnection(server_transport) # Set up a resource for the worker resource = ReplicationRestResource(worker_hs) for servlet in self.servlets: servlet(worker_hs, resource) self._hs_to_site[worker_hs] = SynapseSite( logger_name="synapse.access.http.fake", site_tag="{}-{}".format(worker_hs.config.server.server_name, worker_hs.get_instance_name()), config=worker_hs.config.server.listeners[0], resource=resource, server_version_string="1", ) if worker_hs.config.redis.redis_enabled: worker_hs.get_tcp_replication().start_replication(worker_hs) return worker_hs def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_replication_host"] = "testserv" config["worker_replication_http_port"] = "8765" return config def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): render(request, self._hs_to_site[worker_hs].resource, self.reactor) def replicate(self): """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ self.streamer.on_notifier_poke() self.pump() def _handle_http_replication_attempt(self, hs, repl_port): """Handles a connection attempt to the given HS replication HTTP listener on the given port. """ # We should have at least one outbound connection attempt, where the # last is one to the HTTP repication IP/port. clients = self.reactor.tcpClients self.assertGreaterEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop() self.assertEqual(host, "1.2.3.4") self.assertEqual(port, repl_port) # Set up client side protocol client_protocol = client_factory.buildProtocol(None) request_factory = OneShotRequestFactory() # Set up the server side protocol channel = _PushHTTPChannel(self.reactor) channel.requestFactory = request_factory channel.site = self._hs_to_site[hs] # Connect client to server and vice versa. client_to_server_transport = FakeTransport(channel, self.reactor, client_protocol) client_protocol.makeConnection(client_to_server_transport) server_to_client_transport = FakeTransport(client_protocol, self.reactor, channel) channel.makeConnection(server_to_client_transport) # Note: at this point we've wired everything up, but we need to return # before the data starts flowing over the connections as this is called # inside `connecTCP` before the connection has been passed back to the # code that requested the TCP connection. def connect_any_redis_attempts(self): """If redis is enabled we need to deal with workers connecting to a redis server. We don't want to use a real Redis server so we use a fake one. """ clients = self.reactor.tcpClients self.assertEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) self.assertEqual(host, "localhost") self.assertEqual(port, 6379) client_protocol = client_factory.buildProtocol(None) server_protocol = self._redis_server.buildProtocol(None) client_to_server_transport = FakeTransport(server_protocol, self.reactor, client_protocol) client_protocol.makeConnection(client_to_server_transport) server_to_client_transport = FakeTransport(client_protocol, self.reactor, server_protocol) server_protocol.makeConnection(server_to_client_transport) return client_to_server_transport, server_to_client_transport
class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): """Base class for tests running multiple workers. Automatically handle HTTP replication requests from workers to master, unlike `BaseStreamTestCase`. """ servlets = [] # type: List[Callable[[HomeServer, JsonResource], None]] def setUp(self): super().setUp() # build a replication server self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() store = self.hs.get_datastore() self.database = store.db self.reactor.lookups["testserv"] = "1.2.3.4" self._worker_hs_to_resource = {} # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). self.reactor.add_tcp_client_callback( "1.2.3.4", 8765, self._handle_http_replication_attempt ) def create_test_json_resource(self): """Overrides `HomeserverTestCase.create_test_json_resource`. """ # We override this so that it automatically registers all the HTTP # replication servlets, without having to explicitly do that in all # subclassses. resource = ReplicationRestResource(self.hs) for servlet in self.servlets: servlet(self.hs, resource) return resource def make_worker_hs( self, worker_app: str, extra_config: dict = {}, **kwargs ) -> HomeServer: """Make a new worker HS instance, correctly connecting replcation stream to the master HS. Args: worker_app: Type of worker, e.g. `synapse.app.federation_sender`. extra_config: Any extra config to use for this instances. **kwargs: Options that get passed to `self.setup_test_homeserver`, useful to e.g. pass some mocks for things like `http_client` Returns: The new worker HomeServer instance. """ config = self._get_worker_hs_config() config["worker_app"] = worker_app config.update(extra_config) worker_hs = self.setup_test_homeserver( homeserverToUse=GenericWorkerServer, config=config, reactor=self.reactor, **kwargs ) store = worker_hs.get_datastore() store.db._db_pool = self.database._db_pool repl_handler = ReplicationCommandHandler(worker_hs) client = ClientReplicationStreamProtocol( worker_hs, "client", "test", self.clock, repl_handler, ) server = self.server_factory.buildProtocol(None) client_transport = FakeTransport(server, self.reactor) client.makeConnection(client_transport) server_transport = FakeTransport(client, self.reactor) server.makeConnection(server_transport) # Set up a resource for the worker resource = ReplicationRestResource(self.hs) for servlet in self.servlets: servlet(worker_hs, resource) self._worker_hs_to_resource[worker_hs] = resource return worker_hs def _get_worker_hs_config(self) -> dict: config = self.default_config() config["worker_replication_host"] = "testserv" config["worker_replication_http_port"] = "8765" return config def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): render(request, self._worker_hs_to_resource[worker_hs], self.reactor) def replicate(self): """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ self.streamer.on_notifier_poke() self.pump() def _handle_http_replication_attempt(self): """Handles a connection attempt to the master replication HTTP listener. """ # We should have at least one outbound connection attempt, where the # last is one to the HTTP repication IP/port. clients = self.reactor.tcpClients self.assertGreaterEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop() self.assertEqual(host, "1.2.3.4") self.assertEqual(port, 8765) # Set up client side protocol client_protocol = client_factory.buildProtocol(None) request_factory = OneShotRequestFactory() # Set up the server side protocol channel = _PushHTTPChannel(self.reactor) channel.requestFactory = request_factory channel.site = self.site # Connect client to server and vice versa. client_to_server_transport = FakeTransport( channel, self.reactor, client_protocol ) client_protocol.makeConnection(client_to_server_transport) server_to_client_transport = FakeTransport( client_protocol, self.reactor, channel ) channel.makeConnection(server_to_client_transport)
def prepare(self, reactor, clock, hs): self.factory = ReplicationStreamProtocolFactory(hs)