def makeBroadcasterService(endpoint, local_ivo, test_interval, whitelist): """Create a VOEvent receiver service. The receiver service accepts VOEvent messages submitted to the broker by authors. Parameters ---------- endpoint : implements `twisted.internet.interfaces.IStreamServerEndpoint` The endpoint to which the service will listen. local_ivo : `str` IVOA identifier for the subscriber. test_interval: `int` The interval in seconds between test events to be broadcast. If ``0``, no test events will be sent. whitelist : `list` of `ipaddress.IPv4Network` or `ipaddress.IPv6Network` Only addresses which fall in a network included in the whitelist are permitted to subscribe. """ factory = VOEventBroadcasterFactory(local_ivo, test_interval) if log.LEVEL >= log.Levels.INFO: factory.noisy = False whitelisting_factory = WhitelistingFactory(factory, whitelist, "subscription") if log.LEVEL >= log.Levels.INFO: whitelisting_factory.noisy = False service = StreamServerEndpointService(endpoint, whitelisting_factory) # Shut down, rather than simply logging an error, if we can't bind. service._raiseSynchronously = True return service
def setup(self): storage.init(self.config['database']) cert = OpenPGPCertificate(open(self.config['pgp_cert']).read()) key = OpenPGPPrivateKey(open(self.config['pgp_key']).read()) cred = auth.OpenPGPKontalkCredentials(cert, key, str(self.config['pgp_keyring'])) cred.verify_peer = True ring = keyring.Keyring(storage.MySQLNetworkStorage(), self.config['fingerprint'], self.network, self.servername) self.service = NetService(self.config, self, ring, cred) self.service.logTraffic = self.logTraffic self.sfactory = XMPPNetServerFactory(self.service) self.sfactory.logTraffic = self.logTraffic tls_svc = StreamServerEndpointService( tls.TLSServerEndpoint(reactor=reactor, port=int(self.config['bind'][1]), interface=str(self.config['bind'][0]), credentials=cred), self.sfactory) tls_svc._raiseSynchronously = True return tls_svc
def __init__(self, reactor, cluster_state, configuration_service, endpoint, context_factory): """ :param reactor: See ``ControlServiceLocator.__init__``. :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. :param context_factory: TLS context factory. """ self.connections = set() self._current_command = {} self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, TLSMemoryBIOFactory( context_factory, False, ServerFactory.forProtocol(lambda: ControlAMP(reactor, self)) ) ) # When configuration changes, notify all connected clients: self.configuration_service.register( lambda: self._send_state_to_connections(self.connections))
def service(description, factory, reactor=None): """ Return the service corresponding to a description. @param description: The description of the listening port, in the syntax described by L{twisted.internet.endpoints.serverFromString}. @type description: C{str} @param factory: The protocol factory which will build protocols for connections to this service. @type factory: L{twisted.internet.interfaces.IProtocolFactory} @rtype: C{twisted.application.service.IService} @return: the service corresponding to a description of a reliable stream server. @see: L{twisted.internet.endpoints.serverFromString} """ if reactor is None: from twisted.internet import reactor svc = StreamServerEndpointService( endpoints.serverFromString(reactor, description), factory) svc._raiseSynchronously = True return svc
def setup(self): # initialize storage # doing it here because it's needed by the server factory storage.init(self.config['database']) self.presencedb = storage.MySQLPresenceStorage() # TODO from configuration stor_class = self.config['storage']['class'] klass = getattr(storage, stor_class) self.storage = klass(*self.config['storage']['params']) self.keyring = keyring.Keyring(storage.MySQLNetworkStorage(), self.config['fingerprint'], self.network, self.servername, disable_cache=True) token_auth = auth.AuthKontalkChecker(self.config['fingerprint'], self.keyring) # upload endpoint portal = Portal(FileUploadRealm(self), [token_auth]) resource = HTTPSAuthSessionWrapper(portal, auth.KontalkCertificate) self.putChild('upload', resource) # download endpoint portal = Portal(FileDownloadRealm(self), [token_auth]) resource = HTTPSAuthSessionWrapper(portal, auth.KontalkCertificate) self.putChild('download', resource) # http service self.factory = server.Site(self) sslFactory = xmlstream2.MyOpenSSLCertificateOptions(self.config['ssl_key'], self.config['ssl_cert'], self._sslVerify) endpoint = SSL4ServerEndpoint(reactor, self.config['bind'][1], sslFactory, interface=str(self.config['bind'][0])) svc = StreamServerEndpointService(endpoint, self.factory) svc._raiseSynchronously = True return svc
def setup(self): # initialize storage # doing it here because it's needed by the server factory storage.init(self.config['database']) # TODO from configuration stor_class = self.config['storage']['class'] klass = getattr(storage, stor_class) self.storage = klass(*self.config['storage']['params']) self.keyring = keyring.Keyring(storage.MySQLNetworkStorage(), self.config['fingerprint'], self.network, self.servername, disable_cache=True) token_auth = auth.AuthKontalkChecker(self.config['fingerprint'], self.keyring) # upload endpoint portal = Portal(FileUploadRealm(self), [token_auth]) resource = HTTPSAuthSessionWrapper(portal, auth.KontalkCertificate) self.putChild('upload', resource) # download endpoint portal = Portal(FileDownloadRealm(self), [token_auth]) resource = HTTPSAuthSessionWrapper(portal, auth.KontalkCertificate) self.putChild('download', resource) # http service self.factory = server.Site(self) sslFactory = MyOpenSSLCertificateOptions(self.config['ssl_key'], self.config['ssl_cert'], self._sslVerify) endpoint = SSL4ServerEndpoint(reactor, self.config['bind'][1], sslFactory, interface=str(self.config['bind'][0])) svc = StreamServerEndpointService(endpoint, self.factory) svc._raiseSynchronously = True return svc
class WireServer(object): """ An AMP server for the remote end of a L{WireWorker}. Construct me with an endpoint description string and either an instance or the fully qualified name of a L{WireWorkerUniverse} subclass. @ivar service: A C{StreamServerEndpointService} from C{twisted.application.internet} that you can include in the C{application} of a C{.tac} file, thus accepting connections to run tasks. """ triggerID = None def __init__(self, description, wwu): if isinstance(wwu, str): klass = reflect.namedObject(wwu) wwu = klass(self) WireWorkerUniverse.check(wwu) self.factory = Factory() self.factory.protocol = lambda: amp.AMP(locator=wwu) endpoint = endpoints.serverFromString(reactor, description) self.service = StreamServerEndpointService(endpoint, self.factory) def start(self): self.service.startService() self.triggerID = reactor.addSystemEventTrigger('before', 'shutdown', self.stop) def stop(self): if self.triggerID is None: return defer.succeed(None) self.triggerID = None return self.service.stopService()
def __init__(self, reactor, cluster_state, configuration_service, endpoint, context_factory): """ :param reactor: See ``ControlServiceLocator.__init__``. :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. :param context_factory: TLS context factory. """ self._connections = set() self._reactor = reactor self._connections_pending_update = set() self._current_pending_update_delayed_call = None self._current_command = {} self._last_received_generation = defaultdict( lambda: _ConfigAndStateGeneration()) self._configuration_generation_tracker = GenerationTracker(100) self._state_generation_tracker = GenerationTracker(100) self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, TLSMemoryBIOFactory( context_factory, False, ServerFactory.forProtocol(lambda: ControlAMP(reactor, self)))) # When configuration changes, notify all connected clients: self.configuration_service.register(self._schedule_broadcast_update)
def service( description: str, factory: interfaces.IProtocolFactory, reactor: Optional[interfaces.IReactorCore] = None, ) -> StreamServerEndpointService: """ Return the service corresponding to a description. @param description: The description of the listening port, in the syntax described by L{twisted.internet.endpoints.serverFromString}. @type description: C{str} @param factory: The protocol factory which will build protocols for connections to this service. @type factory: L{twisted.internet.interfaces.IProtocolFactory} @rtype: C{twisted.application.service.IService} @return: the service corresponding to a description of a reliable stream server. @see: L{twisted.internet.endpoints.serverFromString} """ if reactor is None: reactor = _getReactor() svc = StreamServerEndpointService( endpoints.serverFromString(reactor, description), factory) svc._raiseSynchronously = True return svc
def _makeHTTPService(self): """Create the HTTP service.""" from provisioningserver.rackdservices.http import HTTPResource from twisted.application.internet import StreamServerEndpointService from twisted.internet.endpoints import AdoptedStreamServerEndpoint from provisioningserver.utils.twisted import SiteNoLog port = 5249 s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except socket_error as e: if e.errno != ENOPROTOOPT: raise e s.bind(('::', port)) # Use a backlog of 50, which seems to be fairly common. s.listen(50) # Adopt this socket into Twisted's reactor. site_endpoint = AdoptedStreamServerEndpoint(reactor, s.fileno(), s.family) site_endpoint.port = port # Make it easy to get the port number. site_endpoint.socket = s # Prevent garbage collection. http_service = StreamServerEndpointService(site_endpoint, SiteNoLog(HTTPResource())) http_service.setName("http_service") return http_service
def makeService(options): """ Construct a Pantheon SSH service. """ from twisted.internet import reactor factory = SSHFactory() key = options["host-key"] factory.privateKeys = {key.sshType(): key} factory.publicKeys = {key.sshType(): key.public()} realm = PantheonRealm( reactor, options['auth-host'], options['auth-port'], options['client-key'].path, options['client-cert'].path) checker = PantheonHTTPChecker( reactor, options['auth-host'], options['auth-port'], options['client-key'].path, options['client-cert'].path) factory.portal = Portal(realm, [checker]) service = MultiService() for endpoint in options["listen"]: child = StreamServerEndpointService(endpoint, factory) child.setServiceParent(service) return service
def setup(self): # initialize storage # doing it here because it's needed by the c2s server factory storage.init(self.config['database']) self.presencedb = storage.MySQLPresenceStorage() try: stanza_expire = self.config['stanza_expire'] except KeyError: stanza_expire = 0 self.stanzadb = storage.MySQLStanzaStorage(stanza_expire) try: validation_expire = self.config['registration']['expire'] except KeyError: validation_expire = 0 self.validationdb = storage.MySQLUserValidationStorage(validation_expire) self.keyring = keyring.Keyring(storage.MySQLNetworkStorage(), self.config['fingerprint'], self.network, self.servername) authrealm = auth.SASLRealm("Kontalk") authportal = portal.Portal(authrealm, [auth.AuthKontalkChecker(self.config['fingerprint'], self.keyring, self._verify_fingerprint)]) self.sfactory = XMPPServerFactory(authportal, self, self.network, self.servername) self.sfactory.logTraffic = self.config['debug'] if 'ssl_key' in self.config and 'ssl_cert' in self.config: self.sfactory.loadPEM(self.config['ssl_cert'], self.config['ssl_key']) services = [] if 'plain' in self.config['bind']: plain_svc = strports.service('tcp:' + str(self.config['bind']['plain'][1]) + ':interface=' + str(self.config['bind']['plain'][0]), self.sfactory) services.append(plain_svc) if 'ssl' in self.config['bind']: ssl_svc = internet.SSLServer(port=int(self.config['bind']['ssl'][1]), interface=str(self.config['bind']['ssl'][0]), factory=self.sfactory, contextFactory=self.sfactory.getSSLContext()) services.append(ssl_svc) if 'tls' in self.config['bind']: cert = OpenPGPCertificate(open(self.config['pgp_cert']).read()) key = OpenPGPPrivateKey(open(self.config['pgp_key']).read()) cred = auth.OpenPGPKontalkCredentials(cert, key, str(self.config['pgp_keyring'])) cred.verify_peer = True tls_svc = StreamServerEndpointService( tls.TLSServerEndpoint(reactor=reactor, port=int(self.config['bind']['tls'][1]), interface=str(self.config['bind']['tls'][0]), credentials=cred), self.sfactory) tls_svc._raiseSynchronously = True services.append(tls_svc) return services
def setUp(self): """ Construct a stub server, a stub factory, and a L{StreamServerEndpointService} to test. """ self.fakeServer = FakeServer() self.factory = Factory() self.svc = StreamServerEndpointService(self.fakeServer, self.factory)
def __init__(self, description, wwu): if isinstance(wwu, str): klass = reflect.namedObject(wwu) wwu = klass(self) WireWorkerUniverse.check(wwu) self.factory = Factory() self.factory.protocol = lambda: amp.AMP(locator=wwu) endpoint = endpoints.serverFromString(reactor, description) self.service = StreamServerEndpointService(endpoint, self.factory)
def create_prometheus_exporter_service(reactor, port): """Return a service exposing prometheus metrics on the specified port.""" root = Resource() root.putChild(b"metrics", PrometheusMetricsResource(PROMETHEUS_METRICS)) site = Site(root, logFormatter=reducedWebLogFormatter) endpoint = TCP6ServerEndpoint(reactor, port) service = StreamServerEndpointService(endpoint, site) service.setName("prometheus-exporter") return service
def s2(result, self=self): if self._stopDeferred is None: # Fail return self._stopCall = None self._stopDeferred = None # Inside Service.stopService(self)
def __init__(self, device): ''' Initialization of UPnP server ''' self.upnp = UPnP(device) self.device = device self.upnp.parent = self self.site = server.Site(self.upnp) edp = endpoints.serverFromString(reactor, "tcp:0") StreamServerEndpointService.__init__(self, edp, self.site) self._choosenPort = None
def __init__(self, res): ''' Initialization of UPnP server ''' self.resource = res # self.resource = static.File( # '/home/babe/Projets/eclipse/onDemand/src/web/') edp = endpoints.serverFromString(reactor, b'tcp:0') StreamServerEndpointService.__init__(self, edp, server.Site(self.resource)) self._choosenPort = None
def __init__(self, res): ''' Initialization of UPnP server ''' self.resource = res # self.resource = static.File( # '/home/babe/Projets/eclipse/onDemand/src/web/') edp = endpoints.serverFromString(reactor, b'tcp:0') StreamServerEndpointService.__init__( self, edp, server.Site(self.resource)) self._choosenPort = None
def startService(self): MultiService.startService(self) staticPath = FilePath(__file__).sibling("static") root = NoListDirFile(staticPath.path) root.putChild('api', SockJSResource(Factory.forProtocol(DaneDoctorProtocol))) webService = StreamServerEndpointService( serverFromString(self._reactor, "tcp:8080"), Site(root)) webService.setServiceParent(self)
def start_site(reactor, site, secure_ports, insecure_ports, redirect_to_port): parent = MultiService() for secure in secure_ports: StreamServerEndpointService(secure, site).setServiceParent(parent) if insecure_ports: redirector = make_redirector_site(redirect_to_port) for insecure in insecure_ports: StreamServerEndpointService(insecure, redirector).setServiceParent(parent) parent.privilegedStartService() parent.startService()
def __init__(self, config): SimarglClient.__init__(self, config) from twisted.internet import reactor self.factory = SimarglServerFactory(self) StreamServerEndpointService.__init__( self, TCP4ServerEndpoint(reactor, int(config.get('port', 9666)), interface=config.get('host')), self.factory)
def __init__(self, config): SimarglClient.__init__(self, config) from twisted.internet import reactor self.factory = SimarglServerFactory(self) StreamServerEndpointService.__init__( self, TCP4ServerEndpoint(reactor, int(config.get('port', 9666)), interface=config.get('host')), self.factory )
class GoApiWorker(BaseWorker): class CONFIG_CLASS(BaseWorker.CONFIG_CLASS): worker_name = ConfigText( "Name of this Go API worker.", required=True, static=True) twisted_endpoint = ConfigServerEndpoint( "Twisted endpoint to listen on.", required=True, static=True) web_path = ConfigText( "The path to serve this resource on.", required=True, static=True) health_path = ConfigText( "The path to server the health resource on.", default='/health/', static=True) redis_manager = ConfigDict( "Redis client configuration.", default={}, static=True) riak_manager = ConfigDict( "Riak client configuration.", default={}, static=True) _web_service = None def _rpc_resource_for_user(self, username): rpc = GoApiServer(username, self.vumi_api) addIntrospection(rpc) return rpc def get_health_response(self): return "OK" @inlineCallbacks def setup_worker(self): config = self.get_static_config() self.vumi_api = yield VumiApi.from_config_async({ 'redis_manager': config.redis_manager, 'riak_manager': config.riak_manager, }) self.realm = GoUserRealm(self._rpc_resource_for_user) site = build_web_site({ config.web_path: GoUserAuthSessionWrapper( self.realm, self.vumi_api), config.health_path: httprpc.HttpRpcHealthResource(self), }) self._web_service = StreamServerEndpointService( config.twisted_endpoint, site) self._web_service.startService() @inlineCallbacks def teardown_worker(self): if self._web_service is not None: yield self._web_service.stopService() def setup_connectors(self): pass
def _build_api_service(self): """ Once called, the resource is initialized. Any calls to self._add_resource() should be done before calling this fn. :return: """ wsgi_app = self.anchore_service.get_api_application() wsgi_site = wsgi.WSGIResource(reactor, reactor.getThreadPool(), application=wsgi_app) self._add_resource(self.anchore_service.__service_api_version__.encode('utf-8'), wsgi_site) self.root_resource = web.resource.Resource() # Add nodes for name, resource in self.resource_nodes.items(): self.root_resource.putChild(name, resource) # this will rewrite any calls that do not have an explicit version to the base path before being processed by flask self._api_version_bytes = self.anchore_service.__service_api_version__.encode('utf-8') # This is optimization # Handle the auth vs non-auth child resources to not consume a path element root = rewrite.RewriterResource(self.root_resource, self._default_version_rewrite) # Build the main site server site = server.Site(root) listen = self.anchore_service.configuration['listen'] if str(self.anchore_service.configuration.get('ssl_enable', '')).lower() == 'true': try: ssl_data = { 'ssl_cert': _load_ssl_cert(self.anchore_service.configuration['ssl_cert']) if 'ssl_cert' in self.anchore_service.configuration else None, 'ssl_chain': _load_ssl_cert(self.anchore_service.configuration['ssl_chain']) if 'ssl_chain' in self.anchore_service.configuration else None, 'ssl_key': _load_ssl_key(self.anchore_service.configuration['ssl_key']) if 'ssl_key' in self.anchore_service.configuration else None } if ssl_data['ssl_chain']: sfact = ssl.CertificateOptions(privateKey=ssl_data['ssl_key'], certificate=ssl_data['ssl_cert'], extraCertChain=[ssl_data['ssl_chain']]) else: sfact = ssl.CertificateOptions(privateKey=ssl_data['ssl_key'], certificate=ssl_data['ssl_cert']) endpoint = SSL4ServerEndpoint(reactor=reactor, port=int(self.anchore_service.configuration['port']), sslContextFactory=sfact, interface=listen) except Exception as err: raise err else: endpoint = TCP4ServerEndpoint(reactor=reactor, port=int(self.anchore_service.configuration['port']), interface=listen) ret_svc = StreamServerEndpointService(endpoint=endpoint, factory=site) ret_svc.setName(self.anchore_service.name) return ret_svc
def startService(self): MultiService.startService(self) staticPath = FilePath(__file__).sibling("static") root = NoListDirFile(staticPath.path) root.putChild('api', SockJSResource( Factory.forProtocol(DaneDoctorProtocol)) ) webService = StreamServerEndpointService( serverFromString(self._reactor, "tcp:8080"), Site(root) ) webService.setServiceParent(self)
def setup_worker(self): config = self.get_static_config() self.vumi_api = yield VumiApi.from_config_async({ 'redis_manager': config.redis_manager, 'riak_manager': config.riak_manager, }) self.realm = GoUserRealm(self._rpc_resource_for_user) site = build_web_site({ config.web_path: GoUserAuthSessionWrapper( self.realm, self.vumi_api), config.health_path: httprpc.HttpRpcHealthResource(self), }) self._web_service = StreamServerEndpointService( config.twisted_endpoint, site) self._web_service.startService()
def makeService(config, channel_db="relay.sqlite", reactor=reactor): increase_rlimits() parent = MultiService() channel_db = create_or_upgrade_channel_db(config["channel-db"]) usage_db = create_or_upgrade_usage_db(config["usage-db"]) log_file = (os.fdopen(int(config["log-fd"]), "w") if config["log-fd"] is not None else None) server = make_server(channel_db, allow_list=config["allow-list"], advertise_version=config["advertise-version"], signal_error=config["signal-error"], blur_usage=config["blur-usage"], usage_db=usage_db, log_file=log_file, ) server.setServiceParent(parent) rebooted = time.time() def expire(): now = time.time() old = now - CHANNEL_EXPIRATION_TIME server.prune_all_apps(now, old) server.dump_stats(now, rebooted=rebooted) TimerService(EXPIRATION_CHECK_PERIOD, expire).setServiceParent(parent) log_requests = config["blur-usage"] is None site = make_web_server(server, log_requests, config["websocket-protocol-options"]) ep = endpoints.serverFromString(reactor, config["port"]) # to listen StreamServerEndpointService(ep, site).setServiceParent(parent) log.msg("websocket listening on ws://HOSTNAME:PORT/v1") return parent
def makeService(options): """ Make a new subscription manager ``IService``. """ # Boo global reactor # https://twistedmatrix.com/trac/ticket/9063 from twisted.internet import reactor parent = MultiService() eliot_logging_service( reactor, options.get("destinations", []), ).setServiceParent(parent) make_dirs(options["state-path"].path) site = Site(make_resource( options["state-path"], options["domain"].decode("ascii"), options["bucket-name"].decode("ascii"), )) StreamServerEndpointService( serverFromString(reactor, options["listen-address"]), site, ).setServiceParent(parent) return parent
def __init__(self, reactor, cluster_state, configuration_service, endpoint, context_factory): """ :param reactor: See ``ControlServiceLocator.__init__``. :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. :param context_factory: TLS context factory. """ self.connections = set() self._reactor = reactor self._connections_pending_update = set() self._current_pending_update_delayed_call = None self._current_command = {} self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, TLSMemoryBIOFactory( context_factory, False, ServerFactory.forProtocol(lambda: ControlAMP(reactor, self)) ) ) # When configuration changes, notify all connected clients: self.configuration_service.register(self._schedule_broadcast_update)
def main(self, reactor, options): # Many places in both twisted.web and Klein are unhappy with # listening on Unix socket, e.g. # https://twistedmatrix.com/trac/ticket/5406 "fix" that by # pretending we have a port number. Yes, I feel guilty. UNIXAddress.port = 0 # We can use /etc/flocker/agent.yml and /etc/flocker/node.crt to load # some information we need: agent_config = get_configuration(options) control_host = agent_config['control-service']['hostname'] node_id = agent_config['node-credential'].uuid certificates_path = options["agent-config"].parent() control_port = options["rest-api-port"] flocker_client = FlockerClient(reactor, control_host, control_port, certificates_path.child(b"cluster.crt"), certificates_path.child(b"plugin.crt"), certificates_path.child(b"plugin.key")) self._create_listening_directory(PLUGIN_PATH.parent()) endpoint = serverFromString( reactor, "unix:{}:mode=600".format(PLUGIN_PATH.path)) service = StreamServerEndpointService(endpoint, Site( VolumePlugin(reactor, flocker_client, node_id).app.resource())) return main_for_service(reactor, service)
def create_api_service(persistence_service, cluster_state_service, endpoint, context_factory, clock=reactor): """ Create a Twisted Service that serves the API on the given endpoint. :param ConfigurationPersistenceService persistence_service: Service for retrieving and setting desired configuration. :param ClusterStateService cluster_state_service: Service that knows about the current state of the cluster. :param endpoint: Twisted endpoint to listen on. :param context_factory: TLS context factory. :param IReactorTime clock: The clock to use for time. By default global reactor. :return: Service that will listen on the endpoint using HTTP API server. """ api_root = Resource() user = ConfigurationAPIUserV1(persistence_service, cluster_state_service, clock) api_root.putChild('v1', user.app.resource()) api_root._v1_user = user # For unit testing purposes, alas return StreamServerEndpointService( endpoint, TLSMemoryBIOFactory( context_factory, False, Site(api_root) ) )
def makeService(options): from twisted.internet import reactor assert not (options["config"] and options["demo"]) if options["demo"]: app = DownloadEFolder.create_demo(reactor, Logger(log)) else: queue = DeferredQueue() app = DownloadEFolder.from_config( reactor, Logger(log), queue, options["config"], ) if options.subCommand == "create-database": return CreateDatabaseService(reactor, app) if not options["demo"]: app.start_fetch_document_types() app.queue_pending_work() service = MultiService() endpoint = serverFromString(reactor, "tcp:8080:interface=127.0.0.1") StreamServerEndpointService( endpoint, Site(app.app.resource(), logPath="/dev/null"), ).setServiceParent(service) if not options["demo"]: for _ in xrange(8): DeferredQueueConsumerService( queue, lambda item: item()).setServiceParent(service) return service
def __init__(self, device): ''' Initialization of UPnP server ''' self.upnp = UPnP(device) self.devices = [device] device.parent = self self.upnp.parent = self self.site = server.Site(self.upnp) edp = endpoints.serverFromString(reactor, "tcp:0") StreamServerEndpointService.__init__(self, edp, self.site) self._choosenPort = None for service in device.services: service.control_resource = TwistedWebResource(service.app) service.event_resource = ServiceEventResource(service) service.resource = ServiceResource(service)
def magic_folder_web_service(reactor, webport, get_magic_folder, get_auth_token): root = Resource() root.putChild(b"api", MagicFolderWebApi(get_magic_folder, get_auth_token)) return StreamServerEndpointService( serverFromString(reactor, webport), Site(root), )
def __init__(self, cluster_state, configuration_service, endpoint): """ :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. """ self.connections = set() self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self))) # When configuration changes, notify all connected clients: self.configuration_service.register( lambda: self._send_state_to_connections(self.connections))
def get_liveness_service(options, reactor, check_liveness): root = Resource() root.putChild(b"liveness", LivenessResource(check_liveness)) return StreamServerEndpointService( serverFromString(reactor, options["liveness-port"]), Site(root), )
def run_service(node_id): endpoint = serverFromString( reactor, "unix:{}:mode=600".format(PLUGIN_PATH.path)) service = StreamServerEndpointService( endpoint, Site( VolumePlugin(reactor, flocker_client, node_id).app.resource())) return main_for_service(reactor, service)
def start_AMP(self, p_pyhouse_obj): l_endpoint = TCP4ServerEndpoint l_factory = Factory() l_factory.protocol = AMP p_pyhouse_obj.Services.IrControlService = StreamServerEndpointService( l_endpoint, l_factory) p_pyhouse_obj.Services.IrControlService.setName('IrControl') p_pyhouse_obj.Services.IrControlService.setServiceParent( p_pyhouse_obj.Twisted.Application)
def startService(self): if self.running or self.loop != -1: # Already started return msg(self.name, 'start') # Cancel stop if self._stopCall: self._stopCall.cancel() self._stopCall = None # Call stop self._stopDeferred.callback(0) self._stopDeferred = None self.loop = 0 Service.startService(self)
def makeReceiverService(endpoint, local_ivo, validators, handlers, whitelist): """Create a VOEvent receiver service. The receiver service accepts VOEvent messages submitted to the broker by authors. Parameters ---------- endpoint : implements `twisted.internet.interfaces.IStreamServerEndpoint` The endpoint to which the service will listen. local_ivo : `str` IVOA identifier for the subscriber. validators : `list` of implementers of `~comet.icomet.IValidator`. Validators which will be applied to incoming events. Events which fail validation will be rejected. handlers : `list` of implementers of `~comet.icomet.IHandler`. Handlers to which events which pass validation will be passed. whitelist : `list` of `ipaddress.IPv4Network` or `ipaddress.IPv6Network` Submissions are only accepted from addresses which fall in a network included in the whitelist. Warnings -------- Although a non-TCP endpoint can be specified (a Unix domain socket, for example), the whitelist won't be applied to it correctly (indeed, it will probably break horribly). """ factory = VOEventReceiverFactory(local_ivo=local_ivo, validators=validators, handlers=handlers) if log.LEVEL >= log.Levels.INFO: factory.noisy = False whitelisting_factory = WhitelistingFactory(factory, whitelist, "submission") if log.LEVEL >= log.Levels.INFO: whitelisting_factory.noisy = False service = StreamServerEndpointService(endpoint, whitelisting_factory) # Shut down, rather than simply logging an error, if we can't bind. service._raiseSynchronously = True return service
def service(description, factory, default=_DEFAULT, reactor=None): """ Return the service corresponding to a description. @param description: The description of the listening port, in the syntax described by L{twisted.internet.endpoints.server}. @type description: C{str} @param factory: The protocol factory which will build protocols for connections to this service. @type factory: L{twisted.internet.interfaces.IProtocolFactory} @type default: C{str} or C{None} @param default: Do not use this parameter. It has been deprecated since Twisted 10.2.0. @rtype: C{twisted.application.service.IService} @return: the service corresponding to a description of a reliable stream server. @see: L{twisted.internet.endpoints.serverFromString} """ if reactor is None: from twisted.internet import reactor if default is _DEFAULT: default = None else: message = "The 'default' parameter was deprecated in Twisted 10.2.0." if default is not None: message += ( " Use qualified endpoint descriptions; for example, " "'tcp:%s'." % (description,)) warnings.warn( message=message, category=DeprecationWarning, stacklevel=2) svc = StreamServerEndpointService( endpoints._serverFromStringLegacy(reactor, description, default), factory) svc._raiseSynchronously = True return svc
def makeService(self, options): greatPath = FilePath(great.__file__).parent() staticPath = greatPath.child("static") templatesPath = greatPath.child("templates") rootResource = Resource() rootResource.putChild("", File(staticPath.child("index.html").path)) rootResource.putChild("static", File(staticPath.path)) rootResource.putChild("templates", File(templatesPath.path)) rootResource.putChild("great", MinionResource(create_app())) greatService = StreamServerEndpointService( endpoint=options["endpoint"], factory=server.Site(rootResource), ) redirects = options["redirects"] if not redirects: return greatService service = MultiService() greatService.setServiceParent(service) for redirect in redirects: redirectService = StreamServerEndpointService( endpoint=redirect, factory=server.Site(Redirect(options["canonical_url"])), ) redirectService.setServiceParent(service) return service
def makeService(self, options): reactor = self.reactor if reactor is None: from twisted.internet import reactor resolver = self.resolver if resolver is None: resolver = getResolver() with open(options.config) as infile: config = yaml.safe_load(infile) multiService = MultiService() for proxy in config['proxies']: client = endpoints.clientFromString(reactor, str(proxy['client'])) server = endpoints.serverFromString(reactor, str(proxy['server'])) fac = ProxyFactory(client, resolver, proxy) service = StreamServerEndpointService(server, fac) service.setServiceParent(multiService) return multiService
def __init__(self, listen): self.name = 'Receiver' self.loop = -1 # Inside self._stopCall = None self._stopDeferred = None self._workers = 0 (Service.__init__( self, endpoint=serverFromString(reactor, listen), factory=ReceiverFactory(service=self), ))
def __init__(self, debug, basedir, conf): ''' Initialization of web and websocket servers ''' self.playing = False self.conf = conf self.recording = False self.analysing = False self.serving = False self.opened = False self.analyzed = {} self.analyzed['cocktail'] = 0 self.analyzed['result'] = '' self.port = str(conf.httpport) self.debug = debug self.langage = conf.langage self.dbsession = conf.dbsession self.inports = [] self.outports = [] self.sysports = [(0,0),(0,0)] self.page = Dispatcher(debug, basedir, conf) print("installdir= %s" % basedir) self.page.parent = self self.site = server.Site(self.page) self.site.protocol = HTTPChannelHixie76Aware if isinstance(conf.httpport, int): edp = endpoints.serverFromString(reactor, "tcp:"+str(conf.httpport)) else: edp = endpoints.serverFromString(reactor, conf.httpport) StreamServerEndpointService.__init__(self, edp, self.site) self.wsfactory = SeqFactory(debug, self.endpoint._port) self.wsfactory.protocol = PyanoTCP self.wsfactory.setProtocolOptions(allowHixie76 = True) self.wsfactory.parent = self self.wsresource = WebSocketResource(self.wsfactory) self.page.putChild("ws", self.wsresource)
def setUp(self): super(BenchmarkAPITestsMixin, self).setUp() api = BenchmarkAPI_V1(self.backend) site = server.Site(api.app.resource()) def make_client(listening_port): addr = listening_port.getHost() self.agent = client.ProxyAgent( endpoints.TCP4ClientEndpoint( self.reactor, addr.host, addr.port, ), self.reactor, ) listening = Deferred() listening.addCallback(make_client) endpoint = TestEndpoint(self.reactor, listening) self.service = StreamServerEndpointService(endpoint, site) self.service.startService() self.addCleanup(self.service.stopService) return listening
def makeService(self, options): pi = Pi(audience=options["canonical_url"]) piService = StreamServerEndpointService( endpoint=options["endpoint"], factory=server.Site(pi.app.resource()), ) redirects = options["redirects"] if not redirects: return piService service = MultiService() piService.setServiceParent(service) for redirect in redirects: redirectService = StreamServerEndpointService( endpoint=redirect, factory=server.Site(Redirect(options["canonical_url"])), ) redirectService.setServiceParent(service) return service
class ControlAMPService(Service): """ Control Service AMP server. Convergence agents connect to this server. """ logger = Logger() def __init__(self, cluster_state, configuration_service, endpoint): """ :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. """ self.connections = set() self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, ServerFactory.forProtocol(lambda: ControlAMP(self))) # When configuration changes, notify all connected clients: self.configuration_service.register( lambda: self._send_state_to_connections(self.connections)) def startService(self): self.endpoint_service.startService() def stopService(self): self.endpoint_service.stopService() for connection in self.connections: connection.transport.loseConnection() def _send_state_to_connections(self, connections): """ Send desired configuration and cluster state to all given connections. :param connections: A collection of ``AMP`` instances. """ configuration = self.configuration_service.get() state = self.cluster_state.as_deployment() with LOG_SEND_CLUSTER_STATE(self.logger, configuration=configuration, state=state): for connection in connections: action = LOG_SEND_TO_AGENT(self.logger, agent=connection) with action.context(): d = DeferredContext(connection.callRemote( ClusterStatusCommand, configuration=configuration, state=state, eliot_context=action )) d.addActionFinish() d.result.addErrback(lambda _: None) def connected(self, connection): """ A new connection has been made to the server. :param ControlAMP connection: The new connection. """ self.connections.add(connection) self._send_state_to_connections([connection]) def disconnected(self, connection): """ An existing connection has been disconnected. :param ControlAMP connection: The lost connection. """ self.connections.remove(connection) def node_changed(self, state_changes): """ We've received a node state update from a connected client. :param bytes hostname: The hostname of the node. :param list state_changes: One or more ``IClusterStateChange`` providers representing the state change which has taken place. """ self.cluster_state.apply_changes(state_changes) self._send_state_to_connections(self.connections)
self.setContextFactory(name, connection) else: log.msg('SNI not provided, closing SSL connection') connection.shutdown() def getContext(self): return self._context sslContext = SNIContextFactory(depl, config) ports = [ endpoints.TCP4ServerEndpoint(reactor, config.getint('master', 'port')), endpoints.SSL4ServerEndpoint(reactor, config.getint('master', 'sslport'), sslContext), ] logfile = filepath.FilePath(config.get('master', 'http_logfile')) if not logfile.parent().exists(): logfile.parent().makedirs() depl.root.putChild('_admin', resources.VhostListing(depl)) site = server.Site(depl.root, logPath=logfile.path) application = service.Application('appserver') depl.setServiceParent(application) for port in ports: svc = StreamServerEndpointService(port, site) svc.setServiceParent(application)
class ControlAMPService(Service): """ Control Service AMP server. Convergence agents connect to this server. :ivar dict _current_command: A dictionary containing information about connections to which state updates are currently in progress. The keys are protocol instances. The values are ``_UpdateState`` instances. :ivar IReactorTime _reactor: An ``IReactorTime`` provider to be used to schedule delays in sending updates. :ivar set _connections_pending_update: A ``set`` of connections that are currently pending getting an update of state and configuration. An empty set indicates that there is no update pending. :ivar IDelayedCall _current_pending_update_delayed_call: The ``IDelayedCall`` provider for the currently pending call to update state/configuration on connected nodes. """ logger = Logger() def __init__(self, reactor, cluster_state, configuration_service, endpoint, context_factory): """ :param reactor: See ``ControlServiceLocator.__init__``. :param ClusterStateService cluster_state: Object that records known cluster state. :param ConfigurationPersistenceService configuration_service: Persistence service for desired cluster configuration. :param endpoint: Endpoint to listen on. :param context_factory: TLS context factory. """ self.connections = set() self._reactor = reactor self._connections_pending_update = set() self._current_pending_update_delayed_call = None self._current_command = {} self.cluster_state = cluster_state self.configuration_service = configuration_service self.endpoint_service = StreamServerEndpointService( endpoint, TLSMemoryBIOFactory( context_factory, False, ServerFactory.forProtocol(lambda: ControlAMP(reactor, self)) ) ) # When configuration changes, notify all connected clients: self.configuration_service.register(self._schedule_broadcast_update) def startService(self): self.endpoint_service.startService() def stopService(self): if self._current_pending_update_delayed_call: self._current_pending_update_delayed_call.cancel() self._current_pending_update_delayed_call = None self.endpoint_service.stopService() for connection in self.connections: connection.transport.loseConnection() def _send_state_to_connections(self, connections): """ Send desired configuration and cluster state to all given connections. :param connections: A collection of ``AMP`` instances. """ configuration = self.configuration_service.get() state = self.cluster_state.as_deployment() # Connections are separated into three groups to support a scheme which # lets us avoid sending certain updates which we know are not # necessary. This reduces traffic and associated costs (CPU, memory). # # Other schemes are possible and might produce even better performance. # See https://clusterhq.atlassian.net/browse/FLOC-3140 for some # brainstorming. # Collect connections for which there is currently no unacknowledged # update. These can receive a new update right away. can_update = [] # Collect connections for which there is an unacknowledged update. # Since something has changed, these should receive another update once # that acknowledgement is received. delayed_update = [] # Collect connections which were already set to receive a delayed # update and still haven't sent an acknowledgement. These will still # receive a delayed update but we'll also note that we're going to skip # sending one intermediate update to them. elided_update = [] for connection in connections: try: update = self._current_command[connection] except KeyError: # There's nothing in the tracking state for this connection. # That means there's no unacknowledged update. That means we # can send another update right away. can_update.append(connection) else: # These connections do currently have an unacknowledged update # outstanding. if update.next_scheduled: # And these connections are also already scheduled to # receive another update after the one they're currently # processing. That update will include the most up-to-date # information so we're effectively skipping an update # that's no longer useful. elided_update.append(connection) else: # These don't have another update scheduled yet so we'll # schedule one. delayed_update.append(connection) # Make sure to run the logging action inside the caching block. # This lets encoding for logging share the cache with encoding for # network traffic. with LOG_SEND_CLUSTER_STATE() as action: if can_update: # If there are any protocols that can be updated right now, # we also want to see what updates they receive. Since # logging shares the caching context, it shouldn't be any # more expensive to serialize this information into the log # now. We specifically avoid logging this information if # no protocols are being updated because the serializing is # more expensive in that case and at the same time that # information isn't actually useful. action.add_success_fields( configuration=configuration, state=state ) else: # Eliot wants those fields though. action.add_success_fields(configuration=None, state=None) for connection in can_update: self._update_connection(connection, configuration, state) for connection in elided_update: AGENT_UPDATE_ELIDED(agent=connection).write() for connection in delayed_update: self._delayed_update_connection(connection) def _update_connection(self, connection, configuration, state): """ Send a ``ClusterStatusCommand`` to an agent. :param ControlAMP connection: The connection to use to send the command. :param Deployment configuration: The cluster configuration to send. :param DeploymentState state: The current cluster state to send. """ action = LOG_SEND_TO_AGENT(agent=connection) with action.context(): # Use ``maybeDeferred`` so if an exception happens, # it will be wrapped in a ``Failure`` - see FLOC-3221 d = DeferredContext(maybeDeferred( connection.callRemote, ClusterStatusCommand, configuration=configuration, state=state, eliot_context=action )) d.addActionFinish() d.result.addErrback(lambda _: None) update = self._current_command[connection] = _UpdateState( response=d.result, next_scheduled=False, ) def finished_update(ignored): del self._current_command[connection] update.response.addCallback(finished_update) def _delayed_update_connection(self, connection): """ Send a ``ClusterStatusCommand`` to an agent after it has acknowledged the last one. :param ControlAMP connection: The connection to use to send the command. This connection is expected to have previously been sent such a command and to not yet have acknowledged it. Internal state related to this will be used and then updated. """ AGENT_UPDATE_DELAYED(agent=connection).write() update = self._current_command[connection] update.response.addCallback( lambda ignored: self._schedule_update([connection]), ) self._current_command[connection] = update.set(next_scheduled=True) def connected(self, connection): """ A new connection has been made to the server. :param ControlAMP connection: The new connection. """ with AGENT_CONNECTED(agent=connection): self.connections.add(connection) self._schedule_update([connection]) def disconnected(self, connection): """ An existing connection has been disconnected. :param ControlAMP connection: The lost connection. """ self.connections.remove(connection) def _execute_update_connections(self): """ Actually executes an update to all pending connections. """ connections_to_update = self._connections_pending_update self._connections_pending_update = set() self._current_pending_update_delayed_call = None self._send_state_to_connections(connections_to_update) def _schedule_update(self, connections): """ Schedule a call to send_state_to_connections. This function adds a delay in the hopes that additional updates will be scheduled and they can all be called at once in a batch. :param connections: An iterable of connections that will be passed to ``_send_state_to_connections``. """ self._connections_pending_update.update(set(connections)) # If there is no current pending update and there are connections # pending an update, we must schedule the delayed call to update # connections. if (self._current_pending_update_delayed_call is None and self._connections_pending_update): self._current_pending_update_delayed_call = ( self._reactor.callLater( CONTROL_SERVICE_BATCHING_DELAY, self._execute_update_connections ) ) def _schedule_broadcast_update(self): """ Ensure that there is a pending broadcast update call. This is called when the state or configuration is updated, to trigger a broadcast of the current state and configuration to all nodes. In general, it only schedules an update to be broadcast 1 second later so that if we receive multiple updates within that second they are coalesced down to a single update. """ self._schedule_update(self.connections) def node_changed(self, source, state_changes): """ We've received a node state update from a connected client. :param IClusterStateSource source: Representation of where these changes were received from. :param list state_changes: One or more ``IClusterStateChange`` providers representing the state change which has taken place. """ self.cluster_state.apply_changes_from_source(source, state_changes) self._schedule_broadcast_update()
class TestEndpointService(TestCase): """ Tests for L{twisted.application.internet}. """ def setUp(self): """ Construct a stub server, a stub factory, and a L{StreamServerEndpointService} to test. """ self.fakeServer = FakeServer() self.factory = Factory() self.svc = StreamServerEndpointService(self.fakeServer, self.factory) def test_privilegedStartService(self): """ L{StreamServerEndpointService.privilegedStartService} calls its endpoint's C{listen} method with its factory. """ self.svc.privilegedStartService() self.assertIdentical(self.factory, self.fakeServer.factory) def test_synchronousRaiseRaisesSynchronously(self, thunk=None): """ L{StreamServerEndpointService.startService} should raise synchronously if the L{Deferred} returned by its wrapped L{IStreamServerEndpoint.listen} has already fired with an errback and the L{StreamServerEndpointService}'s C{_raiseSynchronously} flag has been set. This feature is necessary to preserve compatibility with old behavior of L{twisted.internet.strports.service}, which is to return a service which synchronously raises an exception from C{startService} (so that, among other things, twistd will not start running). However, since L{IStreamServerEndpoint.listen} may fail asynchronously, it is a bad idea to rely on this behavior. """ self.fakeServer.failImmediately = ZeroDivisionError() self.svc._raiseSynchronously = True self.assertRaises(ZeroDivisionError, thunk or self.svc.startService) def test_synchronousRaisePrivileged(self): """ L{StreamServerEndpointService.privilegedStartService} should behave the same as C{startService} with respect to L{TestEndpointService.test_synchronousRaiseRaisesSynchronously}. """ self.test_synchronousRaiseRaisesSynchronously( self.svc.privilegedStartService) def test_failReportsError(self): """ L{StreamServerEndpointService.startService} and L{StreamServerEndpointService.privilegedStartService} should both log an exception when the L{Deferred} returned from their wrapped L{IStreamServerEndpoint.listen} fails. """ self.svc.startService() self.fakeServer.result.errback(ZeroDivisionError()) logged = self.flushLoggedErrors(ZeroDivisionError) self.assertEquals(len(logged), 1) def test_synchronousFailReportsError(self): """ Without the C{_raiseSynchronously} compatibility flag, failing immediately has the same behavior as failing later; it logs the error. """ self.fakeServer.failImmediately = ZeroDivisionError() self.svc.startService() logged = self.flushLoggedErrors(ZeroDivisionError) self.assertEquals(len(logged), 1) def test_startServiceUnstarted(self): """ L{StreamServerEndpointService.startService} sets the C{running} flag, and calls its endpoint's C{listen} method with its factory, if it has not yet been started. """ self.svc.startService() self.assertIdentical(self.factory, self.fakeServer.factory) self.assertEquals(self.svc.running, True) def test_startServiceStarted(self): """ L{StreamServerEndpointService.startService} sets the C{running} flag, but nothing else, if the service has already been started. """ self.test_privilegedStartService() self.svc.startService() self.assertEquals(self.fakeServer.listenAttempts, 1) self.assertEquals(self.svc.running, True) def test_stopService(self): """ L{StreamServerEndpointService.stopService} calls C{stopListening} on the L{IListeningPort} returned from its endpoint, returns the C{Deferred} from stopService, and sets C{running} to C{False}. """ self.svc.privilegedStartService() self.fakeServer.startedListening() # Ensure running gets set to true self.svc.startService() result = self.svc.stopService() l = [] result.addCallback(l.append) self.assertEquals(len(l), 0) self.fakeServer.stoppedListening() self.assertEquals(len(l), 1) self.assertFalse(self.svc.running) def test_stopServiceBeforeStartFinished(self): """ L{StreamServerEndpointService.stopService} cancels the L{Deferred} returned by C{listen} if it has not yet fired. No error will be logged about the cancellation of the listen attempt. """ self.svc.privilegedStartService() result = self.svc.stopService() l = [] result.addBoth(l.append) self.assertEquals(l, [None]) self.assertEquals(self.flushLoggedErrors(CancelledError), []) def test_stopServiceCancelStartError(self): """ L{StreamServerEndpointService.stopService} cancels the L{Deferred} returned by C{listen} if it has not fired yet. An error will be logged if the resulting exception is not L{CancelledError}. """ self.fakeServer.cancelException = ZeroDivisionError() self.svc.privilegedStartService() result = self.svc.stopService() l = [] result.addCallback(l.append) self.assertEquals(l, [None]) stoppingErrors = self.flushLoggedErrors(ZeroDivisionError) self.assertEquals(len(stoppingErrors), 1)