def addCluster(self, protocol, rack_controller): """Add a new stub cluster using the given `protocol`. The `protocol` should be an instance of `amp.AMP`. :return: A `Deferred` that fires with the connected protocol instance. """ endpoint = endpoints.UNIXClientEndpoint(reactor, self.sockfile) protocol = yield endpoints.connectProtocol(endpoint, protocol) # Mock the registration into the database, as the rack controller is # already created. We reset this once registration is complete so as # to not interfere with other tests. registered = rack_controller patcher = MonkeyPatcher() patcher.add_patch(rackcontrollers, "register", (lambda *args, **kwargs: registered)) # Register the rack controller with the region. patcher.patch() try: yield protocol.callRemote( region.RegisterRackController, system_id=rack_controller.system_id, hostname=rack_controller.hostname, interfaces={}, url=urlparse(""), ) finally: patcher.restore() defer.returnValue(protocol)
def connect(self, cluster, region): """Wire up a connection between cluster and region. Uses a UNIX socket to very rapidly connect the two ends. :type cluster: `twisted.internet.interfaces.IProtocol` :type region: `twisted.internet.interfaces.IProtocol` """ # Wire up the region and cluster protocols via the sockfile. sockfile = path.join(self.sockdir.path, next(self.socknames)) class RegionFactory(Factory): def buildProtocol(self, addr): return region # `doUpdate` has already been called, but with no connections the # mocked `_fetch_rpc_info` caused no `maas_url` to be set on the # RPC service. Set the `maas_url` to the one set on the fixture. self.rpc_service.maas_url = self.maas_url endpoint_region = endpoints.UNIXServerEndpoint(reactor, sockfile) port = yield endpoint_region.listen(RegionFactory()) endpoint_cluster = endpoints.UNIXClientEndpoint(reactor, sockfile) client = yield endpoints.connectProtocol(endpoint_cluster, cluster) # Wait for the client to be fully connected. Because onReady will have # been capped-off by now (see ClusterClient.connectionMade) this will # not raise any exceptions. In some ways this is convenient because it # allows the resulting issues to be encountered within test code. yield client.ready.get() @inlineCallbacks def shutdown(): # We need to make sure that everything is shutdown correctly. TLS # seems to make this even more important: it complains loudly if # connections are not closed cleanly. An interesting article to # read now is Jono Lange's "How to Disconnect in Twisted, Really" # <http://mumak.net/stuff/twisted-disconnect.html>. yield port.loseConnection() yield port.deferred if region.transport is not None: yield region.transport.loseConnection() yield region.onConnectionLost if client.transport is not None: yield client.transport.loseConnection() yield client.onConnectionLost # Fixtures don't wait for deferred work in clean-up tasks (or anywhere # else), so we can't use `self.addCleanup(shutdown)` here. We need to # get the user to add `shutdown` to the clean-up tasks for the *test*, # on the assumption they're using a test framework that accommodates # deferred work (like testtools with `MAASTwistedRunTest`). returnValue(shutdown)
def createClientEndpoint(self, reactor, clientFactory, **connectArgs): """ Create an L{UNIXClientEndpoint} and return the values needed to verify its behaviour. @param reactor: A fake L{IReactorUNIX} that L{UNIXClientEndpoint} can call L{IReactorUNIX.connectUNIX} on. @param clientFactory: The thing that we expect to be passed to our L{IStreamClientEndpoint.connect} implementation. @param connectArgs: Optional dictionary of arguments to L{IReactorUNIX.connectUNIX} """ address = UNIXAddress(self.mktemp()) return (endpoints.UNIXClientEndpoint(reactor, address.name, **connectArgs), (address.name, clientFactory, connectArgs.get('timeout', 30), connectArgs.get('checkPID', 0)), address)
def connect_client(self): endpoint = endpoints.UNIXClientEndpoint(reactor, self.path) client_factory = drpc.ClientFactory(self.creds) client = yield endpoint.connect(client_factory) defer.returnValue(client)