def start(self): # Shutdown the RPC service, switch endpoints, then start again. self.rpc.stopService().wait(10) # Ensure there's a shared-secret. self.secret = security.get_shared_secret() # The RPC service uses a list to manage endpoints, but let's check # those assumptions. assert isinstance(self.rpc.endpoints, list) # Patch a fake UNIX endpoint in to the RPC service. endpoint = endpoints.UNIXServerEndpoint(reactor, self.sockfile) self.monkey.add_patch(self.rpc, "endpoints", [[endpoint]]) # The RPC service uses a defaultdict(set) to manage connections, but # let's check those assumptions. assert isinstance(self.rpc.connections, defaultdict) assert self.rpc.connections.default_factory is set # Patch a fake connections dict into place for this fixture's lifetime. self.monkey.add_patch(self.rpc, "connections", defaultdict(set)) # Modify the state of the service. self.monkey.patch() # Start the service back up again. self.rpc.startService().wait(10)
def __init__(self, dp_addr, dr_socket, dst_connected_callback=None): self.log = logging.getLogger('dr2dp.dr') self.dr_socket = dr_socket self.dst_connected_callback = dst_connected_callback #self.dst_protocol = None # Connect to the dst (DR2DP_DP) self.dstFactory = Factory() self.dstFactory.protocol = DstProtocol self.dstFactory.dr2dp_dr = self self.dstFactory.src_protocol = None endpoint = endpoints.TCP4ClientEndpoint(reactor, dp_addr[0], dp_addr[1]) d = endpoint.connect(self.dstFactory) d.addCallback(self.dst_connected) d.addErrback(self.dst_failed) # Connect to the src (DR) self.srcFactory = Factory() self.srcFactory.protocol = SrcProtocol self.srcFactory.dr2dp_dr = self self.srcFactory.dst_protocol = None # Remove previous instance of socket if it already exists. if os.path.exists(self.dr_socket): os.remove(self.dr_socket) print "starting socket" endpoint = endpoints.UNIXServerEndpoint(reactor, self.dr_socket) endpoint.listen(self.srcFactory) if self.dst_connected_callback: # Let anyone that wants to know that we've connected to the # destination and that the unix socket is up and ready self.dst_connected_callback()
def connect(self, cluster, region): """Wire up a connection between cluster and region. Uses a UNIX socket to very rapidly connect the two ends. :type cluster: `twisted.internet.interfaces.IProtocol` :type region: `twisted.internet.interfaces.IProtocol` """ # Wire up the region and cluster protocols via the sockfile. sockfile = path.join(self.sockdir.path, next(self.socknames)) class RegionFactory(Factory): def buildProtocol(self, addr): return region # `doUpdate` has already been called, but with no connections the # mocked `_fetch_rpc_info` caused no `maas_url` to be set on the # RPC service. Set the `maas_url` to the one set on the fixture. self.rpc_service.maas_url = self.maas_url endpoint_region = endpoints.UNIXServerEndpoint(reactor, sockfile) port = yield endpoint_region.listen(RegionFactory()) endpoint_cluster = endpoints.UNIXClientEndpoint(reactor, sockfile) client = yield endpoints.connectProtocol(endpoint_cluster, cluster) # Wait for the client to be fully connected. Because onReady will have # been capped-off by now (see ClusterClient.connectionMade) this will # not raise any exceptions. In some ways this is convenient because it # allows the resulting issues to be encountered within test code. yield client.ready.get() @inlineCallbacks def shutdown(): # We need to make sure that everything is shutdown correctly. TLS # seems to make this even more important: it complains loudly if # connections are not closed cleanly. An interesting article to # read now is Jono Lange's "How to Disconnect in Twisted, Really" # <http://mumak.net/stuff/twisted-disconnect.html>. yield port.loseConnection() yield port.deferred if region.transport is not None: yield region.transport.loseConnection() yield region.onConnectionLost if client.transport is not None: yield client.transport.loseConnection() yield client.onConnectionLost # Fixtures don't wait for deferred work in clean-up tasks (or anywhere # else), so we can't use `self.addCleanup(shutdown)` here. We need to # get the user to add `shutdown` to the clean-up tasks for the *test*, # on the assumption they're using a test framework that accommodates # deferred work (like testtools with `MAASTwistedRunTest`). returnValue(shutdown)
def startService(self): from twisted.internet import reactor endpoint = endpoints.UNIXServerEndpoint(reactor, self.address, backlog=self.backlog, wantPID=1) self.myPort = yield endpoint.listen(self.protocolFactory) # intercept doRead() to set numberAccepts self.myPort.realDoRead = self.myPort.doRead self.myPort.doRead = maxAcceptDoRead.__get__(self.myPort, self.myPort.__class__)
def createServerEndpoint(self, reactor, factory, **listenArgs): """ Create an L{UNIXServerEndpoint} and return the tools to verify its behaviour. @param reactor: A fake L{IReactorUNIX} that L{UNIXServerEndpoint} can call L{IReactorUNIX.listenUNIX} on. @param factory: The thing that we expect to be passed to our L{IStreamServerEndpoint.listen} implementation. @param listenArgs: Optional dictionary of arguments to L{IReactorUNIX.listenUNIX}. """ address = UNIXAddress(self.mktemp()) return (endpoints.UNIXServerEndpoint(reactor, address.name, **listenArgs), (address.name, factory, listenArgs.get('backlog', 50), listenArgs.get('mode', 0666), listenArgs.get('wantPID', 0)), address)
def main(reactor): tor = yield txtorcon.connect( reactor, endpoints.TCP4ClientEndpoint(reactor, "localhost", 9051), ) unix_p = abspath('./web_socket') ep = endpoints.UNIXServerEndpoint(reactor, unix_p) yield ep.listen(server.Site(Simple())) # ignoring "port" return value def on_progress(percent, tag, msg): print('%03d: %s' % (percent, msg)) print("Note: descriptor upload can take several minutes") onion = yield tor.create_onion_service( ports=[(80, 'unix:{}'.format(unix_p))], version=3, # or try version=2 if you have an older Tor progress=on_progress, ) print("Private key:\n{}".format(onion.private_key)) print("{}".format(onion.hostname)) yield defer.Deferred() # wait forever