def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory('node1', 'router1', None) # start a realm self.realm = RouterRealm(None, None, {'name': 'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get('realm1') self.router.add_role( RouterRoleStaticAuth(self.router, 'test_role', default_permissions={ 'uri': 'com.example.', 'match': 'prefix', 'allow': { 'call': True, 'register': True, 'publish': True, 'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def __init__(self, config=None, reactor=None): NativeWorkerSession.__init__(self, config, reactor) # factory for producing (per-realm) routers self._router_factory = RouterFactory() # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: component ID -> RouterComponent self.components = {} # "global" shared between all components self.components_shared = { u'reactor': reactor } # map: transport ID -> RouterTransport self.transports = {}
def setUp(self): # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, u'test_role', default_permissions={ u'uri': u'com.myapp.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything default_permissions = { u'uri': u'', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True } } self.router = self.router_factory.get(u'realm1') self.router.add_role(RouterRoleStaticAuth(self.router, u'test_role', default_permissions=default_permissions)) self.router.add_role(RouterRoleStaticAuth(self.router, None, default_permissions=default_permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # factory for producing (per-realm) routers self._router_factory = RouterFactory(self._node_id) # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: component ID -> RouterComponent self.components = {} # map: transport ID -> RouterTransport self.transports = {} # the procedures registered procs = [ 'get_router_realms', 'start_router_realm', 'stop_router_realm', 'get_router_realm_roles', 'start_router_realm_role', 'stop_router_realm_role', 'get_router_realm_uplinks', 'start_router_realm_uplink', 'stop_router_realm_uplink', 'get_router_components', 'start_router_component', 'stop_router_component', 'get_router_transports', 'start_router_transport', 'stop_router_transport', ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # NativeWorkerSession.publish_ready() yield self.publish_ready()
def setUp(self): # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.myapp.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def setUp(self): # create a router factory self.router_factory = RouterFactory() # start a realm self.realm = RouterRealm(None, {u"name": u"realm1"}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u"realm1") self.router.add_role( RouterRoleStaticAuth( self.router, u"test_role", default_permissions={ u"uri": u"com.myapp.", u"match": u"prefix", u"allow": {u"call": True, u"register": True, u"publish": True, u"subscribe": True}, }, ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory() # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything permissions = RouterPermissions('', True, True, True, True, True) router = self.router_factory.get(u'realm1') router.add_role(RouterRoleStaticAuth(router, None, default_permissions=permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def start_from_config(self, config): controller_config = config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: log.msg("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_title) ## the node's name (must be unique within the management realm) if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() ## the node's management realm self._realm = controller_config.get('realm', 'crossbar') ## the node controller singleton WAMP application session ## #session_config = ComponentConfig(realm = options.realm, extra = options) self._controller = NodeControllerSession(self) ## router and factory that creates router sessions ## self._router_factory = RouterFactory( options = RouterOptions(uri_check = RouterOptions.URI_CHECK_LOOSE), debug = False) self._router_session_factory = RouterSessionFactory(self._router_factory) ## add the node controller singleton session to the router ## self._router_session_factory.add(self._controller) ## Detect WAMPlets ## wamplets = self._controller._get_wamplets() if len(wamplets) > 0: log.msg("Detected {} WAMPlets in environment:".format(len(wamplets))) for wpl in wamplets: log.msg("WAMPlet {}.{}".format(wpl['dist'], wpl['name'])) else: log.msg("No WAMPlets detected in enviroment.") self.run_node_config(config)
def make_router(realm_name=u'default'): """ Make a router, and return it and a RawSocket factory. """ # create a router factory router_factory = RouterFactory() # start a realm realm = RouterRealm(None, {u'name': realm_name}) router = router_factory.start_realm(realm) extra = {} session_config = ComponentConfig(realm_name, extra) realm.session = RouterServiceSession(session_config, router) # allow everything default_permissions = { u'uri': u'', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True } } router = router_factory.get(realm_name) router.add_role(RouterRoleStaticAuth(router, 'anonymous', default_permissions=default_permissions)) # create a router session factory session_factory = RouterSessionFactory(router_factory) session_factory.add(realm.session, authrole=u'trusted') # Create a new RawSocket factory server_factory = WampRawSocketServerFactory(session_factory, {}) return router, server_factory
def make_router(): """ Make a router, and return it and a RawSocket factory. """ # create a router factory router_factory = RouterFactory(None) # create a router session factory session_factory = RouterSessionFactory(router_factory) # Create a new RawSocket factory rawsocket_server_factory = WampRawSocketServerFactory(session_factory, {}) # Create a new UniSocket factory server_factory = UniSocketServerFactory(rawsocket_factory=rawsocket_server_factory) return router_factory, server_factory, session_factory
def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything permissions = RouterPermissions(u'', True, True, True, True, True) router = self.router_factory.get(u'realm1') router.add_role(RouterRoleStaticAuth(router, None, default_permissions=permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory)
def make_router(): """ Make a router, and return it and a RawSocket factory. """ # create a router factory router_factory = RouterFactory(None, None) # create a router session factory session_factory = RouterSessionFactory(router_factory) # Create a new WebSocket factory websocket_server_factory = WampWebSocketServerFactory(session_factory, '.', {}, None) # Create a new RawSocket factory rawsocket_server_factory = WampRawSocketServerFactory(session_factory, {}) # Create a new UniSocket factory server_factory = UniSocketServerFactory(websocket_factory_map={'/': websocket_server_factory}, rawsocket_factory=rawsocket_server_factory) return router_factory, server_factory, session_factory
class Node(object): """ Crossbar.io Standalone node personality. """ NODE_CONTROLLER = NodeController ROUTER_SERVICE = RouterServiceAgent CONFIG_SOURCE_DEFAULT = 1 CONFIG_SOURCE_EMPTY = 2 CONFIG_SOURCE_LOCALFILE = 3 CONFIG_SOURCE_XBRNETWORK = 4 CONFIG_SOURCE_TO_STR = { 1: 'default', 2: 'empty', 3: 'localfile', 4: 'xbrnetwork', } # A Crossbar.io node is the running a controller process and one or multiple # worker processes. # A single Crossbar.io node runs exactly one instance of this class, hence # this class can be considered a system singleton. log = make_logger() def __init__(self, personality, cbdir=None, reactor=None, native_workers=None, options=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: :class:`twisted.internet.reactor` or None """ self.personality = personality self.options = options or NodeOptions() self._native_workers = personality.native_workers # node directory self._cbdir = cbdir or '.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # allow overriding to add (or remove) native-worker types if native_workers is not None: self._native_workers = native_workers # local node management router self._router_factory = None # session factory for node management router self._router_session_factory = None # the node controller realm self._realm = 'crossbar' # config of this node. self._config = None # node private key :class:`autobahn.wamp.cryptosign.SigningKey` self._node_key = None # when running in managed mode, this will hold the session to CFC self._manager = None # the node's management realm when running in managed mode (this comes from CFC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CFC!) self._node_id = None # node extra when running in managed mode (this comes from CFC!) self._node_extra = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None self._service_sessions = {} # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] # will be filled with a Deferred in start(). the Deferred will fire when # the node has shut down, and the result signals if shutdown was clean self._shutdown_complete = None # wil ve filled with a Deferred in start(). the Deferred will fire when # the node has booted completely self._boot_complete = None # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._webservice_no = 1 self._component_no = 1 @property def realm(self): return self._realm @property def key(self): """ Returns the node (private signing) key pair. :return: The node key. :rtype: :class:`autobahn.wamp.cryptosign.SigningKey` """ return self._node_key def load_keys(self, cbdir): """ Load node public-private key pair from key files, possibly generating a new key pair if none exists. This is the _first_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ was_new, self._node_key = _maybe_generate_key(cbdir) return was_new def load_config(self, configfile=None, default=None): """ Check and load the node configuration from: * from ``.crossbar/config.json`` or * from built-in (empty) default configuration This is the _second_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ self.log.debug('{klass}.load_config(configfile={configfile}, default={default}) ..', klass=self.__class__.__name__, configfile=configfile, default=default) if configfile: config_path = os.path.abspath(os.path.join(self._cbdir, configfile)) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = self.personality.check_config_file(self.personality, config_path) config_source = Node.CONFIG_SOURCE_LOCALFILE else: config_path = None if default: self._config = default config_source = Node.CONFIG_SOURCE_DEFAULT else: self._config = {'version': 2, 'controller': {}, 'workers': []} config_source = Node.CONFIG_SOURCE_EMPTY self.personality.check_config(self.personality, self._config) return config_source, config_path def _add_global_roles(self): controller_role_config = { # there is exactly 1 WAMP component authenticated under authrole "controller": the node controller "name": "controller", "permissions": [{ # the node controller can (locally) do "anything" "uri": "crossbar.", "match": "prefix", "allow": { "call": True, "register": True, "publish": True, "subscribe": True }, "disclose": { "caller": True, "publisher": True }, "cache": True }] } self._router_factory.add_role(self._realm, controller_role_config) self.log.info('{func} node-wide role "{authrole}" added on node management router realm "{realm}"', func=hltype(self._add_global_roles), authrole=hlid(controller_role_config['name']), realm=hlid(self._realm)) def _add_worker_role(self, worker_auth_role, options): worker_role_config = { # each (native) worker is authenticated under a worker-specific authrole "name": worker_auth_role, "permissions": [ # the worker requires these permissions to work: { # management API provided by the worker. note that the worker management API is provided under # the URI prefix "crossbar.worker.<worker_id>". note that the worker is also authenticated # under authrole <worker_auth_role> on realm "crossbar" "uri": worker_auth_role, "match": "prefix", "allow": { "call": True, "register": True, "publish": True, "subscribe": True }, "disclose": { "caller": True, "publisher": True }, "cache": True }, { # controller procedure called by the worker (to check for controller status) "uri": "crossbar.get_status", "match": "exact", "allow": { "call": True, "register": False, "publish": False, "subscribe": False }, "disclose": { "caller": True, "publisher": True }, "cache": True } ] } # if configured to expose the controller connection within the worker (to make it available # in user code such as dynamic authenticators and router/container components), also add # permissions to actually use the (local) node management API if options.get('expose_controller', True): vendor_permissions = { "uri": "crossbar.", "match": "prefix", "allow": { "call": True, "register": False, "publish": False, "subscribe": True }, "disclose": { "caller": True, "publisher": True }, "cache": True } worker_role_config["permissions"].append(vendor_permissions) self._router_factory.add_role(self._realm, worker_role_config) self.log.info('worker-specific role "{authrole}" added on node management router realm "{realm}" {func}', func=hltype(self._add_worker_role), authrole=hlid(worker_role_config['name']), realm=hlid(self._realm)) def _drop_worker_role(self, worker_auth_role): self._router_factory.drop_role(self._realm, worker_auth_role) def _extend_worker_args(self, args, options): pass def _add_extra_controller_components(self, controller_config): pass def _set_shutdown_triggers(self, controller_options): # allow to override node shutdown triggers # if 'shutdown' in controller_options: self._node_shutdown_triggers = controller_options['shutdown'] self.log.info("Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers) else: self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) def set_service_session(self, session, realm, authrole=None): self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")', func=hltype(self.set_service_session), session=session, realm=hlid(realm), authrole=hlid(authrole)) if realm not in self._service_sessions: self._service_sessions[realm] = {} self._service_sessions[realm][authrole] = session def get_service_session(self, realm, authrole=None): if realm in self._service_sessions: if authrole in self._service_sessions[realm]: session = self._service_sessions[realm][authrole] self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")', func=hltype(self.get_service_session), session=session, realm=hlid(realm), authrole=hlid(authrole)) return succeed(session) return succeed(None) def stop(self, restart=False): self._controller._shutdown_was_clean = True return self._controller.shutdown(restart=restart) @inlineCallbacks def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. The node keys (``load_keys``) and configuration (``load_config``) has to be loaded before starting the node. This is the _third_ function being called after the Node has been instantiated. """ self.log.info('{note} [{method}]', note=hl('Starting node ..', color='green', bold=True), method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: self.log.warn('no node configuration set - will use empty node configuration!') self._config = {} # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = '{}-{}'.format(socket.gethostname(), os.getpid()).lower() _node_id_source = 'hostname/pid' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) # local node management router self._router_factory = RouterFactory(self._node_id, None, None) self._router_session_factory = RouterSessionFactory(self._router_factory) # start node-wide realm on node management router rlm_config = {'name': self._realm} rlm = RouterRealm(self._controller, None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm, controller=self._controller) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, router, authid='serviceagent', authrole='trusted') self.log.info('{func} router service agent session attached [{router_service}]', func=hltype(self.start), router_service=hltype(self.ROUTER_SERVICE)) self._router_session_factory.add(self._controller, router, authid='nodecontroller', authrole='controller') self._service_sessions[self._realm] = self._controller self.log.info('{func} node controller session attached [{node_controller}]', func=hltype(self.start), node_controller=hltype(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_config) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # setup node booted complete Deferred self._boot_complete = Deferred() # startup the node personality .. self.log.info('{func}::NODE_BOOT_BEGIN', func=hltype(self.personality.Node.boot)) res = yield self.personality.Node.boot(self) self.log.info('{func}::NODE_BOOT_COMPLETE', func=hltype(self.personality.Node.boot)) # notify observers of boot completition self._boot_complete.callback(res) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = {'shutdown_complete': self._shutdown_complete} returnValue(res) def boot(self): self.log.info('Booting node {method}', method=hltype(Node.boot)) return self.boot_from_config(self._config) @inlineCallbacks def boot_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ # get controller configuration subpart controller = config.get('controller', {}) parallel_worker_start = controller.get('options', {}).get('enable_parallel_worker_start', False) self.log.info( '{bootmsg} {method}', bootmsg=hl( 'Booting node from local configuration [parallel_worker_start={}] ..'.format(parallel_worker_start), color='green', bold=True), method=hltype(Node.boot_from_config)) # start Manhole in node controller if 'manhole' in controller: yield self._controller.call('crossbar.start_manhole', controller['manhole'], options=CallOptions()) self.log.debug("controller: manhole started") # startup all workers workers = config.get('workers', []) if len(workers): self.log.info( hl('Will start {} worker{} ..'.format(len(workers), 's' if len(workers) > 1 else ''), color='green', bold=True)) else: self.log.info(hl('No workers configured, nothing to do', color='green', bold=True)) dl = [] for worker in workers: # worker ID if 'id' in worker: worker_id = worker['id'] else: worker_id = 'worker{:03d}'.format(self._worker_no) worker['id'] = worker_id self._worker_no += 1 # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest') worker_type = worker['type'] # native worker processes setup if worker_type in self._native_workers: # set logname depending on native worker type worker_logname = '{} {}'.format(self._native_workers[worker_type]['logname'], hlid(worker_id)) # any worker specific options worker_options = worker.get('options', {}) worker_disabled = worker_options.get('disabled', False) if worker_disabled: self.log.warn( 'SKIP STARTING OF WORKER ! ("{worker_logname}" disabled from config)', worker_logname=worker_logname, ) else: # start the (native) worker self.log.info( 'Order node to start "{worker_logname}" ..', worker_logname=hlid(worker_logname), ) d = self._controller.call('crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions()) @inlineCallbacks def configure_worker(res, worker_logname, worker_type, worker_id, worker): self.log.info( "Ok, node has started {worker_logname}", worker_logname=worker_logname, ) # now configure the worker self.log.info( "Configuring {worker_logname} ..", worker_logname=worker_logname, ) method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError("A native worker of type '{}' is configured but " "there is no method '{}' on {}".format( worker_type, method_name, type(self))) try: yield config_fn(worker_logname, worker_id, worker) except ApplicationError as e: if e.error != 'wamp.error.canceled': raise self.log.info( 'Ok, worker "{worker_logname}" configured and ready!', worker_logname=hlid(worker_logname), ) d.addCallback(configure_worker, worker_logname, worker_type, worker_id, worker) # guest worker processes setup elif worker_type == 'guest': # now actually start the (guest) worker .. # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers # only take the options (which is part of the whole config item for the worker) d = self._controller.call('crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions()) else: raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type)) if parallel_worker_start: dl.append(d) else: yield d yield gatherResults(dl) self.log.info(hl('Ok, local node configuration ran successfully.', color='green', bold=True)) @inlineCallbacks def _configure_native_worker_common(self, worker_logname, worker_id, worker): # expanding PYTHONPATH of the newly started worker is now done # directly in NodeController._start_native_worker worker_options = worker.get('options', {}) if False: if 'pythonpath' in worker_options: added_paths = yield self._controller.call('crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=CallOptions()) self.log.warn("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) # FIXME: as the CPU affinity is in the worker options, this _also_ (see above fix) # should be done directly in NodeController._start_native_worker if True: if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call('crossbar.worker.{}.set_cpu_affinity'.format(worker_id), worker_options['cpu_affinity'], options=CallOptions()) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) # this is fine to start after the worker has been started, as manhole is # CB developer/support feature anyways (like a vendor diagnostics port) if 'manhole' in worker: yield self._controller.call('crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=CallOptions()) self.log.debug("{worker}: manhole started", worker=worker_logname) @inlineCallbacks def _configure_native_worker_router(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm['id'] else: realm_id = 'realm{:03d}'.format(self._realm_no) realm['id'] = realm_id self._realm_no += 1 self.log.info( "Order {worker_logname} to start Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) yield self._controller.call('crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role['id'] else: role_id = 'role{:03d}'.format(self._role_no) role['id'] = role_id self._role_no += 1 self.log.info( "Order Realm {realm_id} to start Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) yield self._controller.call('crossbar.worker.{}.start_router_realm_role'.format(worker_id), realm_id, role_id, role, options=CallOptions()) self.log.info( "Ok, Realm {realm_id} has started Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component['id'] else: component_id = 'component{:03d}'.format(self._component_no) component['id'] = component_id self._component_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport['id'] else: transport_id = 'transport{:03d}'.format(self._transport_no) transport['id'] = transport_id self._transport_no += 1 add_paths_on_transport_create = False self.log.info( "Order {worker_logname} to start Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) yield self._controller.call('crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, create_paths=add_paths_on_transport_create, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) if not add_paths_on_transport_create: if transport['type'] == 'web': paths = transport.get('paths', {}) elif transport['type'] in ('universal'): paths = transport.get('web', {}).get('paths', {}) else: paths = None # Web service paths if paths: for path in sorted(paths): if path != '/': webservice = paths[path] if 'id' in webservice: webservice_id = webservice['id'] else: webservice_id = 'webservice{:03d}'.format(self._webservice_no) webservice['id'] = webservice_id self._webservice_no += 1 self.log.info( "Order Transport {transport_id} to start Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) yield self._controller.call( 'crossbar.worker.{}.start_web_transport_service'.format(worker_id), transport_id, path, webservice, options=CallOptions()) self.log.info( "Ok, Transport {transport_id} has started Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) # start rlinks for realms dl = [] for realm in worker.get('realms', []): realm_id = realm['id'] for i, rlink in enumerate(realm.get('rlinks', [])): if 'id' in rlink: rlink_id = rlink['id'] else: rlink_id = 'rlink{:03d}'.format(i) rlink['id'] = rlink_id self.log.info( 'Starting router-to-router "{rlink_id}" on realm "{realm_id}" ..', realm_id=hlid(realm_id), rlink_id=hlid(rlink_id), ) d = self._controller.call('crossbar.worker.{}.start_router_realm_link'.format(worker_id), realm_id, rlink_id, rlink, options=CallOptions()) def done(_): self.log.info( 'Ok, router-to-router {rlink_id} started on realm "{realm_id}".', realm_id=hlid(realm_id), rlink_id=hlid(rlink_id), ) d.addCallback(done) dl.append(d) # FIXME: rlinks must be started without waiting for them to be established. otherwise the start of other stuff # is waiting for all rlinks to be up! d = gatherResults(dl) @inlineCallbacks def _configure_native_worker_container(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component['id'] else: component_id = 'component{:03d}'.format(self._component_no) component['id'] = component_id self._component_no += 1 yield self._controller.call('crossbar.worker.{}.start_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) @inlineCallbacks def _configure_native_worker_websocket_testee(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport{:03d}'.format(self._transport_no) transport['id'] = transport_id self._transport_no = 1 yield self._controller.call('crossbar.worker.{}.start_websocket_testee_transport'.format(worker_id), transport_id, transport, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) @inlineCallbacks def _configure_native_worker_proxy(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transports on proxy for i, transport in enumerate(worker.get('transports', [])): if 'id' in transport: transport_id = transport['id'] else: transport_id = 'transport{:03d}'.format(i) transport['id'] = transport_id self.log.info( "Order {worker_logname} to start Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) # XXX we're doing startup, and begining proxy workers -- # want to share the web-transport etc etc stuff between # these and otehr kinds of routers / transports yield self._controller.call('crossbar.worker.{}.start_proxy_transport'.format(worker_id), transport_id, transport, options=CallOptions()) if transport['type'] == 'web': paths = transport.get('paths', {}) elif transport['type'] in ('universal'): paths = transport.get('web', {}).get('paths', {}) else: paths = None # Web service paths if paths: for path in sorted(paths): if path != '/': webservice = paths[path] if 'id' in webservice: webservice_id = webservice['id'] else: webservice_id = 'webservice{:03d}'.format(self._webservice_no) webservice['id'] = webservice_id self._webservice_no += 1 self.log.info( "Order Transport {transport_id} to start Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) yield self._controller.call('crossbar.worker.{}.start_web_transport_service'.format(worker_id), transport_id, path, webservice, options=CallOptions()) self.log.info( "Ok, Transport {transport_id} has started Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) self.log.info( "Ok, {worker_logname} has started Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) # set up backend connections on the proxy for i, connection_name in enumerate(worker.get('connections', {})): self.log.debug( "Starting connection {index}: {name}", index=i, name=connection_name, ) yield self._controller.call( 'crossbar.worker.{}.start_proxy_connection'.format(worker_id), connection_name, worker['connections'].get(connection_name, {}), ) # set up realms and roles on the proxy for i, realm_name in enumerate(worker.get('routes', {})): roles = worker['routes'][realm_name] for role_id, connections in roles.items(): if not isinstance(connections, list): connections = [connections] # used to be a single string, now a list of strings for connection_id in connections: self.log.debug( "Starting proxy realm route {realm}, {role} to {connection}", realm=realm_name, role=role_id, connection=connection_id, ) yield self._controller.call( 'crossbar.worker.{}.start_proxy_realm_route'.format(worker_id), realm_name, {role_id: connection_id}, )
class TestDealer(unittest.TestCase): """ """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(u'realm-001', {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass @defer.inlineCallbacks def test_outstanding_invoke(self): """ When a call is pending and the callee goes away, it cancels the in-flight call """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) rap = RouterApplicationSession(session, self.router_factory) rap.send(message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) # we can retrieve the Registration via # session.mock_calls[-1][1][0] if req'd # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) rap.send(message.Call(42, u'foo')) orig = rap.send d = defer.Deferred() rap.send(message.Goodbye()) def wrapper(*args, **kw): d.callback(args[0]) return orig(*args, **kw) rap.send = wrapper # we can do this *after* the call to send() the Goodbye # (above) because it takes a reactor-turn to actually # process the cancel/errors etc -- hence the Deferred and # yield in this test... msg = yield d self.assertEqual(42, msg.request) self.assertEqual(u'wamp.error.canceled', msg.error) def test_outstanding_invoke_but_caller_gone(self): session = mock.Mock() outstanding = mock.Mock() outstanding.call.request = 1 dealer = self.router._dealer dealer.attach(session) dealer._callee_to_invocations[session] = [outstanding] # pretend we've disconnected already outstanding.caller._transport = None dealer.detach(session) self.assertEqual([], outstanding.mock_calls) def test_call_cancel(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures(call_canceling=True)} dealer = self.router._dealer dealer.attach(session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister(session, message.Register( 1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1 )) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(session, message.Call( 2, u'com.example.my.proc', [] )) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processCancel(session, message.Cancel( 2 )) # should receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsInstance(interrupt_msg, message.Interrupt) self.assertEqual(interrupt_msg.request, invocation_msg.request) dealer.processInvocationError(session, message.Error( message.Invocation.MESSAGE_TYPE, invocation_msg.request, u'wamp.error.canceled' )) call_error_msg = last_message['1'] self.assertIsInstance(call_error_msg, message.Error) self.assertEqual(message.Call.MESSAGE_TYPE, call_error_msg.request_type) self.assertEqual(u'wamp.error.canceled', call_error_msg.error) def test_call_cancel_two_sessions(self): """ this has 2 different session using the same ID (42) for their Call requests to confirm we deal with the fact that these IDs are only unique per-session properly """ messages = [] def session_send(msg): messages.append(msg) session0 = mock.Mock() session0._transport.send = session_send session0._session_roles = {'callee': role.RoleCalleeFeatures(call_canceling=True)} session1 = mock.Mock() session1._transport.send = session_send session1._session_roles = {'callee': role.RoleCalleeFeatures(call_canceling=True)} dealer = self.router._dealer dealer.attach(session0) dealer.attach(session1) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister(session0, message.Register( 1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 2 )) registered_msg = messages[-1] self.assertIsInstance(registered_msg, message.Registered) # two calls outstanding to the endpoint, both happen to use # the same ID (42) which is legal dealer.processCall(session0, message.Call( 42, u'com.example.my.proc', [] )) invocation_msg0 = messages[-1] self.assertIsInstance(invocation_msg0, message.Invocation) dealer.processCall(session1, message.Call( 42, u'com.example.my.proc', [] )) invocation_msg1 = messages[-1] self.assertIsInstance(invocation_msg1, message.Invocation) # now, cancel the first session's call dealer.processCancel(session0, message.Cancel( 42 )) # should receive an INTERRUPT from the dealer now (for the # correct session only) interrupt_msg0 = messages[-1] self.assertIsInstance(interrupt_msg0, message.Interrupt) self.assertEqual(interrupt_msg0.request, invocation_msg0.request) dealer.processInvocationError(session0, message.Error( message.Invocation.MESSAGE_TYPE, invocation_msg0.request, u'wamp.error.canceled' )) def test_call_cancel_without_callee_support(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures()} dealer = self.router._dealer dealer.attach(session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister(session, message.Register( 1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1 )) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(session, message.Call( 2, u'com.example.my.proc', [] )) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processCancel(session, message.Cancel( 2 )) # set message to None to make sure that we get nothing back last_message['1'] = None # should NOT receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsNone(interrupt_msg) def test_force_reregister_kick(self): """ Kick an existing registration with force_reregister=True """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) rap = RouterApplicationSession(session, self.router_factory) rap.send(message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) reg_id = session.mock_calls[-1][1][0].registration # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) # re-register the same procedure rap.send(message.Register(2, u'foo', force_reregister=True)) # the first procedure with 'reg_id' as the Registration ID # should have gotten kicked out unregs = [ call[1][0] for call in session.mock_calls if call[0] == 'onMessage' and isinstance(call[1][0], message.Unregistered) ] self.assertEqual(1, len(unregs)) unreg = unregs[0] self.assertEqual(0, unreg.request) self.assertEqual(reg_id, unreg.registration) def test_yield_on_unowned_invocation(self): sessionMessages = {'1': None} def session1send(msg): sessionMessages['1'] = msg def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) session1 = mock.Mock() session1._transport.send = session1send session2 = mock.Mock() dealer = self.router._dealer dealer.attach(session1) dealer.attach(session2) register = message.Register(1, u'com.example.some.call', u'exact', message.Register.INVOKE_SINGLE, 1) dealer.processRegister(session1, register) registered = sessionMessages['1'] self.assertIsInstance(registered, message.Registered) call = message.Call(2, u'com.example.some.call', [], {}) dealer.processCall(session1, call) invocation = sessionMessages['1'] self.assertIsInstance(invocation, message.Invocation) yieldMsg = message.Yield(invocation.request, [u'hello'], {}) # this yield is happening on a different session than the one that # just received the invocation def yield_from_wrong_session(): dealer.processYield(session2, yieldMsg) self.failUnlessRaises(ProtocolError, yield_from_wrong_session) def test_caller_detach_interrupt_cancel_supported(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures(call_canceling=True)} caller_session = mock.Mock() dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister(session, message.Register( 1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1 )) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(caller_session, message.Call( 2, u'com.example.my.proc', [] )) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.detach(caller_session) # should receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsInstance(interrupt_msg, message.Interrupt) self.assertEqual(interrupt_msg.request, invocation_msg.request) def test_caller_detach_interrupt_cancel_not_supported(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures()} caller_session = mock.Mock() dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister(session, message.Register( 1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1 )) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(caller_session, message.Call( 2, u'com.example.my.proc', [] )) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.detach(caller_session) # reset recorded message to make sure we don't receive anything last_message['1'] = None # should NOT receive an INTERRUPT from the dealer now because we don't support cancellation self.assertIsNone(last_message['1']) def test_concurrency_with_error(self): """ register a concurrency=2 method, called with errors """ callee_messages = [] caller_messages = [] def callee_send(msg): callee_messages.append(msg) session = mock.Mock() session._transport.send = callee_send session._session_roles = {'callee': role.RoleCalleeFeatures()} def caller_send(msg): caller_messages.append(msg) caller_session = mock.Mock() caller_session._transport.send = caller_send dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) # we register out procedure, with concurrency=1 dealer.processRegister(session, message.Register( request=1, procedure=u'com.example.my.proc', match=u'exact', invoke=message.Register.INVOKE_SINGLE, concurrency=1 )) registered_msg = callee_messages[-1] self.assertIsInstance(registered_msg, message.Registered) # we have registered our procedure that has concurrency=1 # and now we call it dealer.processCall(caller_session, message.Call( 2, u'com.example.my.proc', [] )) # we pretend that the call caused an error of some sort invocation_msg = callee_messages[-1] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processInvocationError( session, message.Error( message.Call.MESSAGE_TYPE, invocation_msg.request, u"wamp.error.foo", ) ) self.assertEqual(1, len(caller_messages)) self.assertEqual( u"wamp.error.foo", caller_messages[-1].error, ) # now we call it again, which should work because the # previously-outstanding call was resolved with an error # (before bug 1105 being fixed this wouldn't work properly) dealer.processCall(caller_session, message.Call( 3, u'com.example.my.proc', ['foo'] )) invocation_msg = callee_messages[-1] self.assertIsInstance(invocation_msg, message.Invocation) self.assertEqual(1, len(caller_messages), "got an extra unexpected message") dealer.processYield( session, message.Yield( invocation_msg.request, args=['a result'], ) ) result_msg = caller_messages[-1] self.assertIsInstance(result_msg, message.Result) self.assertEqual(result_msg.args, ['a result'])
class TestDealer(unittest.TestCase): """ """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(u'realm-001', {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass @defer.inlineCallbacks def test_outstanding_invoke(self): """ When a call is pending and the callee goes away, it cancels the in-flight call """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) rap = RouterApplicationSession(session, self.router_factory) rap.send( message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) # we can retrieve the Registration via # session.mock_calls[-1][1][0] if req'd # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) rap.send(message.Call(42, u'foo')) orig = rap.send d = defer.Deferred() rap.send(message.Goodbye()) def wrapper(*args, **kw): d.callback(args[0]) return orig(*args, **kw) rap.send = wrapper # we can do this *after* the call to send() the Goodbye # (above) because it takes a reactor-turn to actually # process the cancel/errors etc -- hence the Deferred and # yield in this test... msg = yield d self.assertEqual(42, msg.request) self.assertEqual(u'wamp.error.canceled', msg.error) def test_outstanding_invoke_but_caller_gone(self): session = mock.Mock() outstanding = mock.Mock() outstanding.call.request = 1 dealer = self.router._dealer dealer.attach(session) dealer._callee_to_invocations[session] = [outstanding] # pretend we've disconnected already outstanding.caller._transport = None dealer.detach(session) self.assertEqual([], outstanding.mock_calls) def test_call_cancel(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = { 'callee': role.RoleCalleeFeatures(call_canceling=True) } dealer = self.router._dealer dealer.attach(session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister( session, message.Register(1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1)) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(session, message.Call(2, u'com.example.my.proc', [])) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processCancel(session, message.Cancel(2)) # should receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsInstance(interrupt_msg, message.Interrupt) self.assertEqual(interrupt_msg.request, invocation_msg.request) dealer.processInvocationError( session, message.Error(message.Invocation.MESSAGE_TYPE, invocation_msg.request, u'wamp.error.canceled')) call_error_msg = last_message['1'] self.assertIsInstance(call_error_msg, message.Error) self.assertEqual(message.Call.MESSAGE_TYPE, call_error_msg.request_type) self.assertEqual(u'wamp.error.canceled', call_error_msg.error) def test_call_cancel_two_sessions(self): """ this has 2 different session using the same ID (42) for their Call requests to confirm we deal with the fact that these IDs are only unique per-session properly """ messages = [] def session_send(msg): messages.append(msg) session0 = mock.Mock() session0._transport.send = session_send session0._session_roles = { 'callee': role.RoleCalleeFeatures(call_canceling=True) } session1 = mock.Mock() session1._transport.send = session_send session1._session_roles = { 'callee': role.RoleCalleeFeatures(call_canceling=True) } dealer = self.router._dealer dealer.attach(session0) dealer.attach(session1) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister( session0, message.Register(1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 2)) registered_msg = messages[-1] self.assertIsInstance(registered_msg, message.Registered) # two calls outstanding to the endpoint, both happen to use # the same ID (42) which is legal dealer.processCall(session0, message.Call(42, u'com.example.my.proc', [])) invocation_msg0 = messages[-1] self.assertIsInstance(invocation_msg0, message.Invocation) dealer.processCall(session1, message.Call(42, u'com.example.my.proc', [])) invocation_msg1 = messages[-1] self.assertIsInstance(invocation_msg1, message.Invocation) # now, cancel the first session's call dealer.processCancel(session0, message.Cancel(42)) # should receive an INTERRUPT from the dealer now (for the # correct session only) interrupt_msg0 = messages[-1] self.assertIsInstance(interrupt_msg0, message.Interrupt) self.assertEqual(interrupt_msg0.request, invocation_msg0.request) dealer.processInvocationError( session0, message.Error(message.Invocation.MESSAGE_TYPE, invocation_msg0.request, u'wamp.error.canceled')) def test_call_cancel_without_callee_support(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures()} dealer = self.router._dealer dealer.attach(session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister( session, message.Register(1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1)) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(session, message.Call(2, u'com.example.my.proc', [])) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processCancel(session, message.Cancel(2)) # set message to None to make sure that we get nothing back last_message['1'] = None # should NOT receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsNone(interrupt_msg) def test_force_reregister_kick(self): """ Kick an existing registration with force_reregister=True """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) rap = RouterApplicationSession(session, self.router_factory) rap.send( message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) reg_id = session.mock_calls[-1][1][0].registration # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) # re-register the same procedure rap.send(message.Register(2, u'foo', force_reregister=True)) # the first procedure with 'reg_id' as the Registration ID # should have gotten kicked out unregs = [ call[1][0] for call in session.mock_calls if call[0] == 'onMessage' and isinstance(call[1][0], message.Unregistered) ] self.assertEqual(1, len(unregs)) unreg = unregs[0] self.assertEqual(0, unreg.request) self.assertEqual(reg_id, unreg.registration) def test_yield_on_unowned_invocation(self): sessionMessages = {'1': None} def session1send(msg): sessionMessages['1'] = msg def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) session1 = mock.Mock() session1._transport.send = session1send session2 = mock.Mock() dealer = self.router._dealer dealer.attach(session1) dealer.attach(session2) register = message.Register(1, u'com.example.some.call', u'exact', message.Register.INVOKE_SINGLE, 1) dealer.processRegister(session1, register) registered = sessionMessages['1'] self.assertIsInstance(registered, message.Registered) call = message.Call(2, u'com.example.some.call', [], {}) dealer.processCall(session1, call) invocation = sessionMessages['1'] self.assertIsInstance(invocation, message.Invocation) yieldMsg = message.Yield(invocation.request, [u'hello'], {}) # this yield is happening on a different session than the one that # just received the invocation def yield_from_wrong_session(): dealer.processYield(session2, yieldMsg) self.failUnlessRaises(ProtocolError, yield_from_wrong_session) def test_caller_detach_interrupt_cancel_supported(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = { 'callee': role.RoleCalleeFeatures(call_canceling=True) } caller_session = mock.Mock() dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister( session, message.Register(1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1)) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(caller_session, message.Call(2, u'com.example.my.proc', [])) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.detach(caller_session) # should receive an INTERRUPT from the dealer now interrupt_msg = last_message['1'] self.assertIsInstance(interrupt_msg, message.Interrupt) self.assertEqual(interrupt_msg.request, invocation_msg.request) def test_caller_detach_interrupt_cancel_not_supported(self): last_message = {'1': []} def session_send(msg): last_message['1'] = msg session = mock.Mock() session._transport.send = session_send session._session_roles = {'callee': role.RoleCalleeFeatures()} caller_session = mock.Mock() dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) dealer.processRegister( session, message.Register(1, u'com.example.my.proc', u'exact', message.Register.INVOKE_SINGLE, 1)) registered_msg = last_message['1'] self.assertIsInstance(registered_msg, message.Registered) dealer.processCall(caller_session, message.Call(2, u'com.example.my.proc', [])) invocation_msg = last_message['1'] self.assertIsInstance(invocation_msg, message.Invocation) dealer.detach(caller_session) # reset recorded message to make sure we don't receive anything last_message['1'] = None # should NOT receive an INTERRUPT from the dealer now because we don't support cancellation self.assertIsNone(last_message['1']) def test_concurrency_with_error(self): """ register a concurrency=2 method, called with errors """ callee_messages = [] caller_messages = [] def callee_send(msg): callee_messages.append(msg) session = mock.Mock() session._transport.send = callee_send session._session_roles = {'callee': role.RoleCalleeFeatures()} def caller_send(msg): caller_messages.append(msg) caller_session = mock.Mock() caller_session._transport.send = caller_send dealer = self.router._dealer dealer.attach(session) dealer.attach(caller_session) def authorize(*args, **kwargs): return defer.succeed({u'allow': True, u'disclose': False}) self.router.authorize = mock.Mock(side_effect=authorize) # we register out procedure, with concurrency=1 dealer.processRegister( session, message.Register(request=1, procedure=u'com.example.my.proc', match=u'exact', invoke=message.Register.INVOKE_SINGLE, concurrency=1)) registered_msg = callee_messages[-1] self.assertIsInstance(registered_msg, message.Registered) # we have registered our procedure that has concurrency=1 # and now we call it dealer.processCall(caller_session, message.Call(2, u'com.example.my.proc', [])) # we pretend that the call caused an error of some sort invocation_msg = callee_messages[-1] self.assertIsInstance(invocation_msg, message.Invocation) dealer.processInvocationError( session, message.Error( message.Call.MESSAGE_TYPE, invocation_msg.request, u"wamp.error.foo", )) self.assertEqual(1, len(caller_messages)) self.assertEqual( u"wamp.error.foo", caller_messages[-1].error, ) # now we call it again, which should work because the # previously-outstanding call was resolved with an error # (before bug 1105 being fixed this wouldn't work properly) dealer.processCall(caller_session, message.Call(3, u'com.example.my.proc', ['foo'])) invocation_msg = callee_messages[-1] self.assertIsInstance(invocation_msg, message.Invocation) self.assertEqual(1, len(caller_messages), "got an extra unexpected message") dealer.processYield( session, message.Yield( invocation_msg.request, args=['a result'], )) result_msg = caller_messages[-1] self.assertIsInstance(result_msg, message.Result) self.assertEqual(result_msg.args, ['a result'])
class RouterWorkerSession(NativeWorkerSession): """ A native Crossbar.io worker that runs a WAMP router which can manage multiple realms, run multiple transports and links, as well as host multiple (embedded) application components. """ WORKER_TYPE = 'router' @inlineCallbacks def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # factory for producing (per-realm) routers self._router_factory = RouterFactory() # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: transport ID -> RouterTransport self.transports = {} # map: link ID -> RouterLink self.links = {} # map: component ID -> RouterComponent self.components = {} # the procedures registered procs = [ 'get_router_realms', 'start_router_realm', 'stop_router_realm', 'get_router_realm_roles', 'start_router_realm_role', 'stop_router_realm_role', 'get_router_components', 'start_router_component', 'stop_router_component', 'get_router_transports', 'start_router_transport', 'stop_router_transport', 'get_router_links', 'start_router_link', 'stop_router_link' ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # NativeWorkerSession.publish_ready() yield self.publish_ready() def get_router_realms(self, details=None): """ List realms currently managed by this router. """ self.log.debug("{}.get_router_realms".format(self.__class__.__name__)) raise Exception("not implemented") def start_router_realm(self, id, config, schemas=None, details=None): """ Starts a realm managed by this router. :param id: The ID of the realm to start. :type id: str :param config: The realm configuration. :type config: dict :param schemas: An (optional) initial schema dictionary to load. :type schemas: dict """ self.log.debug("{}.start_router_realm".format(self.__class__.__name__), id=id, config=config, schemas=schemas) # URI of the realm to start realm = config['name'] # track realm rlm = RouterRealm(id, config) self.realms[id] = rlm self.realm_to_id[realm] = id # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(realm) rlm.session = RouterServiceSession(cfg, router, schemas) self._router_session_factory.add(rlm.session, authrole=u'trusted') def stop_router_realm(self, id, close_sessions=False, details=None): """ Stop a router realm. When a realm has stopped, no new session will be allowed to attach to the realm. Optionally, close all sessions currently attached to the realm. :param id: ID of the realm to stop. :type id: str :param close_sessions: If `True`, close all session currently attached. :type close_sessions: bool """ self.log.debug("{}.stop_router_realm".format(self.__class__.__name__), id=id, close_sessions=close_sessions) # FIXME raise NotImplementedError() def get_router_realm_roles(self, id, details=None): """ :param id: The ID of the router realm to list roles for. :type id: str :returns: list -- A list of roles. """ self.log.debug("{}.get_router_realm_roles".format(self.__class__.__name__), id=id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) return self.realms[id].roles.values() def start_router_realm_role(self, id, role_id, config, details=None): """ Adds a role to a realm. :param id: The ID of the realm the role should be added to. :type id: str :param role_id: The ID of the role to add. :type role_id: str :param config: The role configuration. :type config: dict """ self.log.debug("{}.add_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id, config=config) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id in self.realms[id].roles: raise ApplicationError(u"crossbar.error.already_exists", "A role with ID '{}' already exists in realm with ID '{}'".format(role_id, id)) self.realms[id].roles[role_id] = RouterRealmRole(role_id, config) realm = self.realms[id].config['name'] self._router_factory.add_role(realm, config) def stop_router_realm_role(self, id, role_id, details=None): """ Drop a role from a realm. :param id: The ID of the realm to drop a role from. :type id: str :param role_id: The ID of the role within the realm to drop. :type role_id: str """ self.log.debug("{}.drop_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id not in self.realms[id].roles: raise ApplicationError(u"crossbar.error.no_such_object", "No role with ID '{}' in realm with ID '{}'".format(role_id, id)) del self.realms[id].roles[role_id] def get_router_components(self, details=None): """ List application components currently running (embedded) in this router. """ self.log.debug("{}.get_router_components".format(self.__class__.__name__)) res = [] for component in sorted(self.components.values(), key=lambda c: c.created): res.append({ 'id': component.id, 'created': utcstr(component.created), 'config': component.config, }) return res def start_router_component(self, id, config, details=None): """ Dynamically start an application component to run next to the router in "embedded mode". :param id: The ID of the component to start. :type id: str :param config: The component configuration. :type config: obj """ self.log.debug("{}.start_router_component".format(self.__class__.__name__), id=id, config=config) # prohibit starting a component twice # if id in self.components: emsg = "Could not start component: a component with ID '{}'' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_component(config) except Exception as e: emsg = "Invalid router component configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {type}-component on router.", type=config['type']) # resolve references to other entities # references = {} for ref in config.get('references', []): ref_type, ref_id = ref.split(':') if ref_type == u'connection': if ref_id in self._connections: references[ref] = self._connections[ref_id] else: emsg = "cannot resolve reference '{}' - no '{}' with ID '{}'".format(ref, ref_type, ref_id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: emsg = "cannot resolve reference '{}' - invalid reference type '{}'".format(ref, ref_type) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # create component config # realm = config['realm'] extra = config.get('extra', None) component_config = ComponentConfig(realm=realm, extra=extra) create_component = _appsession_loader(config) # .. and create and add an WAMP application session to # run the component next to the router # try: session = create_component(component_config) # any exception spilling out from user code in onXXX handlers is fatal! def panic(fail, msg): self.log.error("Fatal error in component: {} - {}".format(msg, fail.value)) session.disconnect() session._swallow_error = panic except Exception as e: msg = "{}".format(e).strip() self.log.error("Component instantiation failed:\n\n{err}", err=msg) raise self.components[id] = RouterComponent(id, config, session) self._router_session_factory.add(session, authrole=config.get('role', u'anonymous')) self.log.debug("Added component {id}", id=id) def stop_router_component(self, id, details=None): """ Stop an application component running on this router. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.stop_router_component`` **Errors:** The procedure may raise the following errors: * ``crossbar.error.no_such_object`` - no component with given ID is currently running in this router * ``crossbar.error.cannot_stop`` - failed to stop the component running in this router :param id: The ID of the component to stop. :type id: unicode """ self.log.debug("{}.stop_router_component".format(self.__class__.__name__), id=id) if id in self.components: self.log.debug("Worker {}: stopping component {}".format(self.config.extra.worker, id)) try: # self._components[id].disconnect() self._session_factory.remove(self.components[id]) del self.components[id] except Exception as e: raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop component {}: {}".format(id, e)) else: raise ApplicationError(u"crossbar.error.no_such_object", "No component {}".format(id)) def get_router_transports(self, details=None): """ List currently running transports. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.get_router_transports`` :returns: List of transports currently running. :rtype: list of dict """ self.log.debug("{}.get_router_transports".format(self.__class__.__name__)) res = [] for transport in sorted(self.transports.values(), key=lambda c: c.created): res.append({ 'id': transport.id, 'created': utcstr(transport.created), 'config': transport.config, }) return res def start_router_transport(self, id, config, details=None): """ Start a transport on this router and return when the transport has started. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.start_router_transport`` The procedure takes a WAMP transport configuration with a listening endpoint, e.g. .. code-block:: javascript { "type": "websocket", "endpoint": { "type": "tcp", "port": 8080 } } **Errors:** The procedure may raise the following errors: * ``crossbar.error.invalid_configuration`` - the provided transport configuration is invalid * ``crossbar.error.already_running`` - a transport with the given ID is already running (or starting) * ``crossbar.error.cannot_listen`` - could not listen on the configured listening endpoint of the transport * ``crossbar.error.class_import_failed`` - a side-by-side component could not be instantiated **Events:** The procedure will publish an event when the transport **is starting** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_starting`` and publish an event when the transport **has started** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_started`` :param id: The ID of the transport to start. :type id: unicode :param config: The transport configuration. :type config: dict """ self.log.debug("{}.start_router_transport".format(self.__class__.__name__), id=id, config=config) # prohibit starting a transport twice # if id in self.transports: emsg = "Could not start transport: a transport with ID '{}' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_transport(config) except Exception as e: emsg = "Invalid router transport configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {}-transport on router.".format(config['type'])) # standalone WAMP-RawSocket transport # if config['type'] == 'rawsocket': transport_factory = WampRawSocketServerFactory(self._router_session_factory, config) transport_factory.noisy = False # standalone WAMP-WebSocket transport # elif config['type'] == 'websocket': transport_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, config, self._templates) transport_factory.noisy = False # Flash-policy file server pseudo transport # elif config['type'] == 'flashpolicy': transport_factory = FlashPolicyFactory(config.get('allowed_domain', None), config.get('allowed_ports', None)) # WebSocket testee pseudo transport # elif config['type'] == 'websocket.testee': transport_factory = WebSocketTesteeServerFactory(config, self._templates) # Stream testee pseudo transport # elif config['type'] == 'stream.testee': transport_factory = StreamTesteeServerFactory() # Twisted Web based transport # elif config['type'] == 'web': options = config.get('options', {}) # create Twisted Web root resource # root_config = config['paths']['/'] root_type = root_config['type'] root_options = root_config.get('options', {}) # Static file hierarchy root resource # if root_type == 'static': if 'directory' in root_config: root_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, root_config['directory'])) elif 'package' in root_config: if 'resource' not in root_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(root_config['package']) except ImportError as e: emsg = "Could not import resource {} from package {}: {}".format(root_config['resource'], root_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: try: root_dir = os.path.abspath(pkg_resources.resource_filename(root_config['package'], root_config['resource'])) except Exception as e: emsg = "Could not import resource {} from package {}: {}".format(root_config['resource'], root_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: mod_version = getattr(mod, '__version__', '?.?.?') self.log.info("Loaded static Web resource '{}' from package '{} {}' (filesystem path {})".format(root_config['resource'], root_config['package'], mod_version, root_dir)) else: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing web spec") root_dir = root_dir.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 self.log.debug("Starting Web service at root directory {}".format(root_dir)) # create resource for file system hierarchy # if root_options.get('enable_directory_listing', False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = root_options.get('cache_timeout', DEFAULT_CACHE_TIMEOUT) root = static_resource_class(root_dir, cache_timeout=cache_timeout) # set extra MIME types # root.contentTypes.update(EXTRA_MIME_TYPES) if 'mime_types' in root_options: root.contentTypes.update(root_options['mime_types']) patchFileContentTypes(root) # render 404 page on any concrete path not found # root.childNotFound = Resource404(self._templates, root_dir) # WSGI root resource # elif root_type == 'wsgi': if not _HAS_WSGI: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI unsupported") # wsgi_options = root_config.get('options', {}) if 'module' not in root_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app module") if 'object' not in root_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = root_config['module'] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path)) else: obj_name = root_config['object'] if obj_name not in mod.__dict__: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name)) else: app = getattr(mod, obj_name) # create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(self._reactor, self._reactor.getThreadPool(), app) except Exception as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e)) else: # create a root resource serving everything via WSGI root = WSGIRootResource(wsgi_resource, {}) # Redirecting root resource # elif root_type == 'redirect': redirect_url = root_config['url'].encode('ascii', 'ignore') root = RedirectResource(redirect_url) # Publisher resource (part of REST-bridge) # elif root_type == 'publisher': # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=root_config['realm'], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publishing session to the router # self._router_session_factory.add(publisher_session, authrole=root_config.get('role', 'anonymous')) # now create the publisher Twisted Web resource and add it to resource tree # root = PublisherResource(root_config.get('options', {}), publisher_session) # Webhook resource (part of REST-bridge) # elif root_type == 'webhook': # create a vanilla session: the webhook will use this to inject events # webhook_session_config = ComponentConfig(realm=root_config['realm'], extra=None) webhook_session = ApplicationSession(webhook_session_config) # add the publishing session to the router # self._router_session_factory.add(webhook_session, authrole=root_config.get('role', 'anonymous')) # now create the webhook Twisted Web resource and add it to resource tree # root = WebhookResource(root_config.get('options', {}), webhook_session) # Caller resource (part of REST-bridge) # elif root_type == 'caller': # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=root_config['realm'], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=root_config.get('role', 'anonymous')) # now create the caller Twisted Web resource and add it to resource tree # root = CallerResource(root_config.get('options', {}), caller_session) # Generic Twisted Web resource # elif root_type == 'resource': try: klassname = root_config['classname'] self.log.debug("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) root = make(root_config.get('extra', {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) self.log.error(emsg) self.log.error("PYTHONPATH: {pythonpath}", pythonpath=sys.path) raise ApplicationError(u"crossbar.error.class_import_failed", emsg) # Invalid root resource # else: raise ApplicationError(u"crossbar.error.invalid_configuration", "invalid Web root path type '{}'".format(root_type)) # create Twisted Web resources on all non-root paths configured # self.add_paths(root, config.get('paths', {})) # create the actual transport factory # transport_factory = Site(root) transport_factory.noisy = False # Web access logging # if not options.get('access_log', False): transport_factory.log = lambda _: None # Traceback rendering # transport_factory.displayTracebacks = options.get('display_tracebacks', False) # HSTS # if options.get('hsts', False): if 'tls' in config['endpoint']: hsts_max_age = int(options.get('hsts_max_age', 31536000)) transport_factory.requestFactory = createHSTSRequestFactory(transport_factory.requestFactory, hsts_max_age) else: self.log.warn("Warning: HSTS requested, but running on non-TLS - skipping HSTS") # Unknown transport type # else: # should not arrive here, since we did check_transport() in the beginning raise Exception("logic error") # create transport endpoint / listening port from transport factory # d = create_listening_port_from_config(config['endpoint'], transport_factory, self.config.extra.cbdir, self._reactor) def ok(port): self.transports[id] = RouterTransport(id, config, transport_factory, port) self.log.debug("Router transport '{}'' started and listening".format(id)) return def fail(err): emsg = "Cannot listen on transport endpoint: {}".format(err.value) self.log.error(emsg) raise ApplicationError(u"crossbar.error.cannot_listen", emsg) d.addCallbacks(ok, fail) return d def add_paths(self, resource, paths): """ Add all configured non-root paths under a resource. :param resource: The parent resource under which to add paths. :type resource: Resource :param paths: The path configurations. :type paths: dict """ for path in sorted(paths): if isinstance(path, six.text_type): webPath = path.encode('utf8') else: webPath = path if path != b"/": resource.putChild(webPath, self.create_resource(paths[path])) def create_resource(self, path_config): """ Creates child resource to be added to the parent. :param path_config: Configuration for the new child resource. :type path_config: dict :returns: Resource -- the new child resource """ # WAMP-WebSocket resource # if path_config['type'] == 'websocket': ws_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, path_config, self._templates) # FIXME: Site.start/stopFactory should start/stop factories wrapped as Resources ws_factory.startFactory() return WebSocketResource(ws_factory) # Static file hierarchy resource # elif path_config['type'] == 'static': static_options = path_config.get('options', {}) if 'directory' in path_config: static_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) elif 'package' in path_config: if 'resource' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(path_config['package']) except ImportError as e: emsg = "Could not import resource {} from package {}: {}".format(path_config['resource'], path_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: try: static_dir = os.path.abspath(pkg_resources.resource_filename(path_config['package'], path_config['resource'])) except Exception as e: emsg = "Could not import resource {} from package {}: {}".format(path_config['resource'], path_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing web spec") static_dir = static_dir.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 # create resource for file system hierarchy # if static_options.get('enable_directory_listing', False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = static_options.get('cache_timeout', DEFAULT_CACHE_TIMEOUT) static_resource = static_resource_class(static_dir, cache_timeout=cache_timeout) # set extra MIME types # static_resource.contentTypes.update(EXTRA_MIME_TYPES) if 'mime_types' in static_options: static_resource.contentTypes.update(static_options['mime_types']) patchFileContentTypes(static_resource) # render 404 page on any concrete path not found # static_resource.childNotFound = Resource404(self._templates, static_dir) return static_resource # WSGI resource # elif path_config['type'] == 'wsgi': if not _HAS_WSGI: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI unsupported") # wsgi_options = path_config.get('options', {}) if 'module' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app module") if 'object' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = path_config['module'] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path)) else: obj_name = path_config['object'] if obj_name not in mod.__dict__: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name)) else: app = getattr(mod, obj_name) # create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(self._reactor, self._reactor.getThreadPool(), app) except Exception as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e)) else: return wsgi_resource # Redirecting resource # elif path_config['type'] == 'redirect': redirect_url = path_config['url'].encode('ascii', 'ignore') return RedirectResource(redirect_url) # JSON value resource # elif path_config['type'] == 'json': value = path_config['value'] return JsonResource(value) # CGI script resource # elif path_config['type'] == 'cgi': cgi_processor = path_config['processor'] cgi_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) cgi_directory = cgi_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 return CgiDirectory(cgi_directory, cgi_processor, Resource404(self._templates, cgi_directory)) # WAMP-Longpoll transport resource # elif path_config['type'] == 'longpoll': path_options = path_config.get('options', {}) lp_resource = WampLongPollResource(self._router_session_factory, timeout=path_options.get('request_timeout', 10), killAfter=path_options.get('session_timeout', 30), queueLimitBytes=path_options.get('queue_limit_bytes', 128 * 1024), queueLimitMessages=path_options.get('queue_limit_messages', 100), debug=path_options.get('debug', False), debug_transport_id=path_options.get('debug_transport_id', None) ) lp_resource._templates = self._templates return lp_resource # Publisher resource (part of REST-bridge) # elif path_config['type'] == 'publisher': # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=path_config['realm'], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publisher session to the router # self._router_session_factory.add(publisher_session, authrole=path_config.get('role', 'anonymous')) # now create the publisher Twisted Web resource # return PublisherResource(path_config.get('options', {}), publisher_session) # Webhook resource (part of REST-bridge) # elif path_config['type'] == 'webhook': # create a vanilla session: the webhook will use this to inject events # webhook_session_config = ComponentConfig(realm=path_config['realm'], extra=None) webhook_session = ApplicationSession(webhook_session_config) # add the webhook session to the router # self._router_session_factory.add(webhook_session, authrole=path_config.get('role', 'anonymous')) # now create the webhook Twisted Web resource # return WebhookResource(path_config.get('options', {}), webhook_session) # Caller resource (part of REST-bridge) # elif path_config['type'] == 'caller': # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=path_config['realm'], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=path_config.get('role', 'anonymous')) # now create the caller Twisted Web resource # return CallerResource(path_config.get('options', {}), caller_session) # File Upload resource # elif path_config['type'] == 'upload': upload_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) upload_directory = upload_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 if not os.path.isdir(upload_directory): emsg = "configured upload directory '{}' in file upload resource isn't a directory".format(upload_directory) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) if 'temp_directory' in path_config: temp_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['temp_directory'])) temp_directory = temp_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 else: temp_directory = os.path.abspath(tempfile.gettempdir()) temp_directory = os.path.join(temp_directory, 'crossbar-uploads') if not os.path.exists(temp_directory): os.makedirs(temp_directory) if not os.path.isdir(temp_directory): emsg = "configured temp directory '{}' in file upload resource isn't a directory".format(temp_directory) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # file upload progress and finish events are published via this session # upload_session_config = ComponentConfig(realm=path_config['realm'], extra=None) upload_session = ApplicationSession(upload_session_config) self._router_session_factory.add(upload_session, authrole=path_config.get('role', 'anonymous')) self.log.info("File upload resource started. Uploads to {upl} using temp folder {tmp}.", upl=upload_directory, tmp=temp_directory) return FileUploadResource(upload_directory, temp_directory, path_config['form_fields'], upload_session, path_config.get('options', {})) # Generic Twisted Web resource # elif path_config['type'] == 'resource': try: klassname = path_config['classname'] self.log.debug("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) return make(path_config.get('extra', {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) self.log.error(emsg) self.log.error("PYTHONPATH: {pythonpath}", pythonpath=sys.path) raise ApplicationError(u"crossbar.error.class_import_failed", emsg) # Schema Docs resource # elif path_config['type'] == 'schemadoc': realm = path_config['realm'] if realm not in self.realm_to_id: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with URI '{}' configured".format(realm)) realm_id = self.realm_to_id[realm] realm_schemas = self.realms[realm_id].session._schemas return SchemaDocResource(self._templates, realm, realm_schemas) # Nested subpath resource # elif path_config['type'] == 'path': nested_paths = path_config.get('paths', {}) if '/' in nested_paths: nested_resource = self.create_resource(nested_paths['/']) else: nested_resource = Resource() # nest subpaths under the current entry # self.add_paths(nested_resource, nested_paths) return nested_resource else: raise ApplicationError(u"crossbar.error.invalid_configuration", "invalid Web path type '{}'".format(path_config['type'])) def stop_router_transport(self, id, details=None): """ Stop a transport currently running in this router and return when the transport has stopped. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.stop_router_transport`` **Errors:** The procedure may raise the following errors: * ``crossbar.error.not_running`` - no transport with given ID is currently running on this router (or the transport is already stopping) * ``crossbar.error.cannot_stop`` - could not stop listening on the transport listening endpoint **Events:** The procedure will publish an event when the transport **is stopping** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_stopping`` and publish an event when the transport **has stopped** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_stopped`` :param id: The ID of the transport to stop. :type id: unicode """ self.log.debug("{}.stop_router_transport".format(self.__class__.__name__), id=id) # FIXME if id not in self.transports: # if not id in self.transports or self.transports[id].status != 'started': emsg = "Cannot stop transport: no transport with ID '{}' or transport is already stopping".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.not_running', emsg) self.log.debug("Stopping transport with ID '{}'".format(id)) d = self.transports[id].port.stopListening() def ok(_): del self.transports[id] def fail(err): raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop transport: {}".format(str(err.value))) d.addCallbacks(ok, fail) return d def get_router_links(self, details=None): """ List currently running router links. """ self.log.debug("{}.get_router_links".format(self.__class__.__name__)) raise NotImplementedError() def start_router_link(self, id, config, details=None): """ Start a link on this router. :param id: The ID of the link to start. :type id: str :param config: The link configuration. :type config: dict """ self.log.debug("{}.start_router_link".format(self.__class__.__name__), id=id, config=config) raise NotImplementedError() def stop_router_link(self, id, details=None): """ Stop a link on this router. :param id: The ID of the link to stop. :type id: str """ self.log.debug("{}.stop_router_link".format(self.__class__.__name__), id=id) raise NotImplementedError()
class TestEmbeddedSessions(unittest.TestCase): """ Test cases for application session running embedded in router. """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything default_permissions = { u'uri': u'', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True } } self.router = self.router_factory.get(u'realm1') self.router.add_role(RouterRoleStaticAuth(self.router, u'test_role', default_permissions=default_permissions)) self.router.add_role(RouterRoleStaticAuth(self.router, None, default_permissions=default_permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_authorize_exception_call(self): """ When a dynamic authorizor throws an exception (during processCall) we log it. """ the_exception = RuntimeError("authorizer bug") def boom(*args, **kw): raise the_exception self.router._roles[u'test_role'].authorize = boom class TestSession(ApplicationSession): def __init__(self, *args, **kw): super(TestSession, self).__init__(*args, **kw) self._authrole = u'test_role' self._transport = mock.MagicMock() session0 = TestSession() self.router._dealer._registration_map.add_observer(session0, u'test.proc') # okay, we have an authorizer that will always explode and a # single procedure registered; when we call it, then # on_authorize_error (in dealer.py) should get called and our # error logged. call = message.Call( request=1234, procedure=u'test.proc', args=tuple(), kwargs=dict(), ) # this should produce an error -- however processCall doesn't # itself return the Deferred, so we look for the side-effect # -- the router should have tried to send a message.Error (and # we should also have logged the error). self.router._dealer.processCall(session0, call) self.assertEqual(1, len(session0._transport.mock_calls)) call = session0._transport.mock_calls[0] self.assertEqual('send', call[0]) # ensure we logged our error (flushLoggedErrors also causes # trial to *not* fail the unit-test despite an error logged) errors = self.flushLoggedErrors() self.assertTrue(the_exception in [fail.value for fail in errors]) def test_authorize_exception_register(self): """ When a dynamic authorizor throws an exception (during processRegister) we log it. """ the_exception = RuntimeError("authorizer bug") def boom(*args, **kw): raise the_exception self.router._roles[u'test_role'].authorize = boom class TestSession(ApplicationSession): def __init__(self, *args, **kw): super(TestSession, self).__init__(*args, **kw) self._authrole = u'test_role' self._transport = mock.MagicMock() session0 = TestSession() call = message.Register( request=1234, procedure=u'test.proc_reg', ) # this should produce an error -- however processCall doesn't # itself return the Deferred, so we look for the side-effect # -- the router should have tried to send a message.Error (and # we should also have logged the error). self.router._dealer.processRegister(session0, call) self.assertEqual(1, len(session0._transport.mock_calls)) call = session0._transport.mock_calls[0] self.assertEqual('send', call[0]) # ensure we logged our error (flushLoggedErrors also causes # trial to *not* fail the unit-test despite an error logged) errors = self.flushLoggedErrors() self.assertTrue(the_exception in [fail.value for fail in errors]) def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from _RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, *args, **kw): errors.append((args, kw)) session = TestSession(types.ComponentConfig(u'realm1')) # in this test, we are just looking for onUserError to get # called so we don't need to patch the logger. this should # call onJoin, triggering our error self.session_factory.add(session) # check we got the right log.failure() call self.assertTrue(len(errors) > 0, "expected onUserError call") fail = errors[0][0][0] self.assertTrue(fail.value == the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): # noinspection PyUnusedLocal def on_event(*arg, **kwargs): pass d2 = self.subscribe(on_event, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d
def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration set") # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # local node management router # self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles # self._add_global_roles() # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component # self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components # self._add_extra_controller_components(controller_options) # setup Node shutdown triggers # self._set_shutdown_triggers(controller_options) panic = False try: # startup the node personality .. yield self._startup() # .. and notify systemd that we are fully up and running try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: # do nothing on non-systemd platforms pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True self.log.failure() self.log.error('fatal: could not startup node') if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: self.check_config() controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle(controller_title) # the node's name (must be unique within the management realm) if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() if 'manager' in controller_config: extra = { 'onready': Deferred(), # authentication information for connecting to uplinkg CDC router # using WAMP-CRA authentication # 'authid': self._node_id, 'authkey': controller_config['manager']['key'] } realm = controller_config['manager']['realm'] transport = controller_config['manager']['transport'] runner = ApplicationRunner(url=transport['url'], realm=realm, extra=extra, debug_wamp=False) runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._management_session = yield extra['onready'] self.log.info( "Node is connected to Crossbar.io DevOps Center (CDC)") else: self._management_session = None # the node's management realm self._realm = controller_config.get('realm', 'crossbar') # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) rlm = RouterRealm(None, {'name': self._realm}) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._management_session: self._bridge_session = NodeManagementBridgeSession( cfg, self._management_session) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.info("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True for line in e.args[0].strip().splitlines(): self.log.error(line) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
class RouterWorkerSession(NativeWorkerSession): """ A native Crossbar.io worker that runs a WAMP router which can manage multiple realms, run multiple transports and links, as well as host multiple (embedded) application components. """ WORKER_TYPE = 'router' @inlineCallbacks def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # factory for producing (per-realm) routers self._router_factory = RouterFactory(self._node_id) # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: component ID -> RouterComponent self.components = {} # map: transport ID -> RouterTransport self.transports = {} # the procedures registered procs = [ 'get_router_realms', 'start_router_realm', 'stop_router_realm', 'get_router_realm_roles', 'start_router_realm_role', 'stop_router_realm_role', 'get_router_realm_uplinks', 'start_router_realm_uplink', 'stop_router_realm_uplink', 'get_router_components', 'start_router_component', 'stop_router_component', 'get_router_transports', 'start_router_transport', 'stop_router_transport', ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # NativeWorkerSession.publish_ready() yield self.publish_ready() def get_router_realms(self, details=None): """ Get realms currently running on this router worker. :returns: List of realms currently running. :rtype: list of dict """ self.log.debug("{}.get_router_realms".format(self.__class__.__name__)) raise Exception("not implemented") @inlineCallbacks def start_router_realm(self, id, config, schemas=None, enable_trace=False, details=None): """ Starts a realm on this router worker. :param id: The ID of the realm to start. :type id: str :param config: The realm configuration. :type config: dict :param schemas: An (optional) initial schema dictionary to load. :type schemas: dict """ self.log.debug("{}.start_router_realm".format(self.__class__.__name__), id=id, config=config, schemas=schemas) # prohibit starting a realm twice # if id in self.realms: emsg = "Could not start realm: a realm with ID '{}' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_realm(config) except Exception as e: emsg = "Invalid router realm configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # URI of the realm to start realm = config['name'] # track realm rlm = RouterRealm(id, config) self.realms[id] = rlm self.realm_to_id[realm] = id # create a new router for the realm router = self._router_factory.start_realm(rlm) if enable_trace: router._trace_traffic = True router._trace_traffic_roles_include = None router._trace_traffic_roles_exclude = [u'trusted'] self.log.info(">>> Traffic tracing enabled! <<<") # add a router/realm service session extra = { 'onready': Deferred() } cfg = ComponentConfig(realm, extra) rlm.session = RouterServiceSession(cfg, router, schemas=schemas) self._router_session_factory.add(rlm.session, authrole=u'trusted') yield extra['onready'] self.log.info("Realm '{realm}' started", realm=realm) def stop_router_realm(self, id, close_sessions=False, details=None): """ Stop a realm currently running on this router worker. When a realm has stopped, no new session will be allowed to attach to the realm. Optionally, close all sessions currently attached to the realm. :param id: ID of the realm to stop. :type id: str :param close_sessions: If `True`, close all session currently attached. :type close_sessions: bool """ self.log.debug("{}.stop_router_realm".format(self.__class__.__name__), id=id, close_sessions=close_sessions) # FIXME raise NotImplementedError() def get_router_realm_roles(self, id, details=None): """ Get roles currently running on a realm running on this router worker. :param id: The ID of the realm to list roles for. :type id: str :returns: A list of roles. :rtype: list of dicts """ self.log.debug("{}.get_router_realm_roles".format(self.__class__.__name__), id=id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) return self.realms[id].roles.values() def start_router_realm_role(self, id, role_id, config, details=None): """ Start a role on a realm running on this router worker. :param id: The ID of the realm the role should be started on. :type id: str :param role_id: The ID of the role to start under. :type role_id: str :param config: The role configuration. :type config: dict """ self.log.debug("{}.start_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id, config=config) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id in self.realms[id].roles: raise ApplicationError(u"crossbar.error.already_exists", "A role with ID '{}' already exists in realm with ID '{}'".format(role_id, id)) self.realms[id].roles[role_id] = RouterRealmRole(role_id, config) realm = self.realms[id].config['name'] self._router_factory.add_role(realm, config) def stop_router_realm_role(self, id, role_id, details=None): """ Stop a role currently running on a realm running on this router worker. :param id: The ID of the realm of the role to be stopped. :type id: str :param role_id: The ID of the role to be stopped. :type role_id: str """ self.log.debug("{}.stop_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id not in self.realms[id].roles: raise ApplicationError(u"crossbar.error.no_such_object", "No role with ID '{}' in realm with ID '{}'".format(role_id, id)) del self.realms[id].roles[role_id] def get_router_realm_uplinks(self, id, details=None): """ Get uplinks currently running on a realm running on this router worker. :param id: The ID of the router realm to list uplinks for. :type id: str :returns: A list of uplinks. :rtype: list of dicts """ self.log.debug("{}.get_router_realm_uplinks".format(self.__class__.__name__)) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) return self.realms[id].uplinks.values() @inlineCallbacks def start_router_realm_uplink(self, realm_id, uplink_id, uplink_config, details=None): """ Start an uplink on a realm running on this router worker. :param realm_id: The ID of the realm the uplink should be started on. :type realm_id: unicode :param uplink_id: The ID of the uplink to start. :type uplink_id: unicode :param uplink_config: The uplink configuration. :type uplink_config: dict """ self.log.debug("{}.start_router_realm_uplink".format(self.__class__.__name__), realm_id=realm_id, uplink_id=uplink_id, uplink_config=uplink_config) # check arguments if realm_id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(realm_id)) if uplink_id in self.realms[realm_id].uplinks: raise ApplicationError(u"crossbar.error.already_exists", "An uplink with ID '{}' already exists in realm with ID '{}'".format(uplink_id, realm_id)) # create a representation of the uplink self.realms[realm_id].uplinks[uplink_id] = RouterRealmUplink(uplink_id, uplink_config) # create the local session of the bridge realm = self.realms[realm_id].config['name'] extra = { 'onready': Deferred(), 'uplink': uplink_config } uplink_session = uplink.LocalSession(ComponentConfig(realm, extra)) self._router_session_factory.add(uplink_session, authrole=u'trusted') # wait until the uplink is ready try: uplink_session = yield extra['onready'] except Exception as e: self.log.error(e) raise e self.realms[realm_id].uplinks[uplink_id].session = uplink_session self.log.info("Realm is connected to Crossbar.io uplink router") def stop_router_realm_uplink(self, id, uplink_id, details=None): """ Stop an uplink currently running on a realm running on this router worker. :param id: The ID of the realm to stop an uplink on. :type id: str :param uplink_id: The ID of the uplink within the realm to stop. :type uplink_id: str """ self.log.debug("{}.stop_router_realm_uplink".format(self.__class__.__name__), id=id, uplink_id=uplink_id) raise NotImplementedError() def get_router_components(self, details=None): """ Get app components currently running in this router worker. :returns: List of app components currently running. :rtype: list of dict """ self.log.debug("{}.get_router_components".format(self.__class__.__name__)) res = [] for component in sorted(self.components.values(), key=lambda c: c.created): res.append({ 'id': component.id, 'created': utcstr(component.created), 'config': component.config, }) return res def start_router_component(self, id, config, details=None): """ Start an app component in this router worker. :param id: The ID of the component to start. :type id: str :param config: The component configuration. :type config: obj """ self.log.debug("{}.start_router_component".format(self.__class__.__name__), id=id, config=config) # prohibit starting a component twice # if id in self.components: emsg = "Could not start component: a component with ID '{}'' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_component(config) except Exception as e: emsg = "Invalid router component configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {type}-component on router.", type=config['type']) # resolve references to other entities # references = {} for ref in config.get('references', []): ref_type, ref_id = ref.split(':') if ref_type == u'connection': if ref_id in self._connections: references[ref] = self._connections[ref_id] else: emsg = "cannot resolve reference '{}' - no '{}' with ID '{}'".format(ref, ref_type, ref_id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: emsg = "cannot resolve reference '{}' - invalid reference type '{}'".format(ref, ref_type) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # create component config # realm = config['realm'] extra = config.get('extra', None) component_config = ComponentConfig(realm=realm, extra=extra) create_component = _appsession_loader(config) # .. and create and add an WAMP application session to # run the component next to the router # try: session = create_component(component_config) # any exception spilling out from user code in onXXX handlers is fatal! def panic(fail, msg): self.log.error( "Fatal error in component: {msg} - {log_failure.value}", msg=msg, log_failure=fail ) session.disconnect() session._swallow_error = panic except Exception: self.log.error( "Component instantiation failed", log_failure=Failure(), ) raise self.components[id] = RouterComponent(id, config, session) self._router_session_factory.add(session, authrole=config.get('role', u'anonymous')) self.log.debug("Added component {id}", id=id) def stop_router_component(self, id, details=None): """ Stop an app component currently running in this router worker. :param id: The ID of the component to stop. :type id: str """ self.log.debug("{}.stop_router_component".format(self.__class__.__name__), id=id) if id in self.components: self.log.debug("Worker {}: stopping component {}".format(self.config.extra.worker, id)) try: # self._components[id].disconnect() self._session_factory.remove(self.components[id]) del self.components[id] except Exception as e: raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop component {}: {}".format(id, e)) else: raise ApplicationError(u"crossbar.error.no_such_object", "No component {}".format(id)) def get_router_transports(self, details=None): """ Get transports currently running in this router worker. :returns: List of transports currently running. :rtype: list of dict """ self.log.debug("{}.get_router_transports".format(self.__class__.__name__)) res = [] for transport in sorted(self.transports.values(), key=lambda c: c.created): res.append({ 'id': transport.id, 'created': utcstr(transport.created), 'config': transport.config, }) return res def start_router_transport(self, id, config, details=None): """ Start a transport on this router worker. :param id: The ID of the transport to start. :type id: str :param config: The transport configuration. :type config: dict """ self.log.debug("{}.start_router_transport".format(self.__class__.__name__), id=id, config=config) # prohibit starting a transport twice # if id in self.transports: emsg = "Could not start transport: a transport with ID '{}' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_transport(config) except Exception as e: emsg = "Invalid router transport configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {}-transport on router.".format(config['type'])) # standalone WAMP-RawSocket transport # if config['type'] == 'rawsocket': transport_factory = WampRawSocketServerFactory(self._router_session_factory, config) transport_factory.noisy = False # standalone WAMP-WebSocket transport # elif config['type'] == 'websocket': transport_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, config, self._templates) transport_factory.noisy = False # Flash-policy file server pseudo transport # elif config['type'] == 'flashpolicy': transport_factory = FlashPolicyFactory(config.get('allowed_domain', None), config.get('allowed_ports', None)) # WebSocket testee pseudo transport # elif config['type'] == 'websocket.testee': transport_factory = WebSocketTesteeServerFactory(config, self._templates) # Stream testee pseudo transport # elif config['type'] == 'stream.testee': transport_factory = StreamTesteeServerFactory() # Twisted Web based transport # elif config['type'] == 'web': options = config.get('options', {}) # create Twisted Web root resource # if '/' in config['paths']: root_config = config['paths']['/'] root = self._create_resource(root_config, nested=False) else: root = Resource404(self._templates, b'') # create Twisted Web resources on all non-root paths configured # self._add_paths(root, config.get('paths', {})) # create the actual transport factory # transport_factory = Site(root) transport_factory.noisy = False # Web access logging # if not options.get('access_log', False): transport_factory.log = lambda _: None # Traceback rendering # transport_factory.displayTracebacks = options.get('display_tracebacks', False) # HSTS # if options.get('hsts', False): if 'tls' in config['endpoint']: hsts_max_age = int(options.get('hsts_max_age', 31536000)) transport_factory.requestFactory = createHSTSRequestFactory(transport_factory.requestFactory, hsts_max_age) else: self.log.warn("Warning: HSTS requested, but running on non-TLS - skipping HSTS") # Unknown transport type # else: # should not arrive here, since we did check_transport() in the beginning raise Exception("logic error") # create transport endpoint / listening port from transport factory # d = create_listening_port_from_config(config['endpoint'], self.config.extra.cbdir, transport_factory, self._reactor, self.log) def ok(port): self.transports[id] = RouterTransport(id, config, transport_factory, port) self.log.debug("Router transport '{}'' started and listening".format(id)) return def fail(err): emsg = "Cannot listen on transport endpoint: {log_failure}" self.log.error(emsg, log_failure=err) raise ApplicationError(u"crossbar.error.cannot_listen", emsg) d.addCallbacks(ok, fail) return d def _add_paths(self, resource, paths): """ Add all configured non-root paths under a resource. :param resource: The parent resource under which to add paths. :type resource: Resource :param paths: The path configurations. :type paths: dict """ for path in sorted(paths): if isinstance(path, six.text_type): webPath = path.encode('utf8') else: webPath = path if path != b"/": resource.putChild(webPath, self._create_resource(paths[path])) def _create_resource(self, path_config, nested=True): """ Creates child resource to be added to the parent. :param path_config: Configuration for the new child resource. :type path_config: dict :returns: Resource -- the new child resource """ # WAMP-WebSocket resource # if path_config['type'] == 'websocket': ws_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, path_config, self._templates) # FIXME: Site.start/stopFactory should start/stop factories wrapped as Resources ws_factory.startFactory() return WebSocketResource(ws_factory) # Static file hierarchy resource # elif path_config['type'] == 'static': static_options = path_config.get('options', {}) if 'directory' in path_config: static_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) elif 'package' in path_config: if 'resource' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(path_config['package']) except ImportError as e: emsg = "Could not import resource {} from package {}: {}".format(path_config['resource'], path_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: try: static_dir = os.path.abspath(pkg_resources.resource_filename(path_config['package'], path_config['resource'])) except Exception as e: emsg = "Could not import resource {} from package {}: {}".format(path_config['resource'], path_config['package'], e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing web spec") static_dir = static_dir.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 # create resource for file system hierarchy # if static_options.get('enable_directory_listing', False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = static_options.get('cache_timeout', DEFAULT_CACHE_TIMEOUT) static_resource = static_resource_class(static_dir, cache_timeout=cache_timeout) # set extra MIME types # static_resource.contentTypes.update(EXTRA_MIME_TYPES) if 'mime_types' in static_options: static_resource.contentTypes.update(static_options['mime_types']) patchFileContentTypes(static_resource) # render 404 page on any concrete path not found # static_resource.childNotFound = Resource404(self._templates, static_dir) return static_resource # WSGI resource # elif path_config['type'] == 'wsgi': if not _HAS_WSGI: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI unsupported") if 'module' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app module") if 'object' not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = path_config['module'] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path)) else: obj_name = path_config['object'] if obj_name not in mod.__dict__: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name)) else: app = getattr(mod, obj_name) # Create a threadpool for running the WSGI requests in pool = ThreadPool(maxthreads=path_config.get("maxthreads", 20), minthreads=path_config.get("minthreads", 0), name="crossbar_wsgi_threadpool") self._reactor.addSystemEventTrigger('before', 'shutdown', pool.stop) pool.start() # Create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(self._reactor, pool, app) if not nested: wsgi_resource = WSGIRootResource(wsgi_resource, {}) except Exception as e: raise ApplicationError(u"crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e)) else: return wsgi_resource # Redirecting resource # elif path_config['type'] == 'redirect': redirect_url = path_config['url'].encode('ascii', 'ignore') return RedirectResource(redirect_url) # JSON value resource # elif path_config['type'] == 'json': value = path_config['value'] return JsonResource(value) # CGI script resource # elif path_config['type'] == 'cgi': cgi_processor = path_config['processor'] cgi_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) cgi_directory = cgi_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 return CgiDirectory(cgi_directory, cgi_processor, Resource404(self._templates, cgi_directory)) # WAMP-Longpoll transport resource # elif path_config['type'] == 'longpoll': path_options = path_config.get('options', {}) lp_resource = WampLongPollResource(self._router_session_factory, timeout=path_options.get('request_timeout', 10), killAfter=path_options.get('session_timeout', 30), queueLimitBytes=path_options.get('queue_limit_bytes', 128 * 1024), queueLimitMessages=path_options.get('queue_limit_messages', 100), debug=path_options.get('debug', False), debug_transport_id=path_options.get('debug_transport_id', None) ) lp_resource._templates = self._templates return lp_resource # Publisher resource (part of REST-bridge) # elif path_config['type'] == 'publisher': # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=path_config['realm'], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publisher session to the router # self._router_session_factory.add(publisher_session, authrole=path_config.get('role', 'anonymous')) # now create the publisher Twisted Web resource # return PublisherResource(path_config.get('options', {}), publisher_session) # Webhook resource (part of REST-bridge) # elif path_config['type'] == 'webhook': # create a vanilla session: the webhook will use this to inject events # webhook_session_config = ComponentConfig(realm=path_config['realm'], extra=None) webhook_session = ApplicationSession(webhook_session_config) # add the webhook session to the router # self._router_session_factory.add(webhook_session, authrole=path_config.get('role', 'anonymous')) # now create the webhook Twisted Web resource # return WebhookResource(path_config.get('options', {}), webhook_session) # Caller resource (part of REST-bridge) # elif path_config['type'] == 'caller': # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=path_config['realm'], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=path_config.get('role', 'anonymous')) # now create the caller Twisted Web resource # return CallerResource(path_config.get('options', {}), caller_session) # File Upload resource # elif path_config['type'] == 'upload': upload_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) upload_directory = upload_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 if not os.path.isdir(upload_directory): emsg = "configured upload directory '{}' in file upload resource isn't a directory".format(upload_directory) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) if 'temp_directory' in path_config: temp_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['temp_directory'])) temp_directory = temp_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 else: temp_directory = os.path.abspath(tempfile.gettempdir()) temp_directory = os.path.join(temp_directory, 'crossbar-uploads') if not os.path.exists(temp_directory): os.makedirs(temp_directory) if not os.path.isdir(temp_directory): emsg = "configured temp directory '{}' in file upload resource isn't a directory".format(temp_directory) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # file upload progress and finish events are published via this session # upload_session_config = ComponentConfig(realm=path_config['realm'], extra=None) upload_session = ApplicationSession(upload_session_config) self._router_session_factory.add(upload_session, authrole=path_config.get('role', 'anonymous')) self.log.info("File upload resource started. Uploads to {upl} using temp folder {tmp}.", upl=upload_directory, tmp=temp_directory) return FileUploadResource(upload_directory, temp_directory, path_config['form_fields'], upload_session, path_config.get('options', {})) # Generic Twisted Web resource # elif path_config['type'] == 'resource': try: klassname = path_config['classname'] self.log.debug("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) return make(path_config.get('extra', {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) self.log.error(emsg) self.log.error("PYTHONPATH: {pythonpath}", pythonpath=sys.path) raise ApplicationError(u"crossbar.error.class_import_failed", emsg) # Schema Docs resource # elif path_config['type'] == 'schemadoc': realm = path_config['realm'] if realm not in self.realm_to_id: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with URI '{}' configured".format(realm)) realm_id = self.realm_to_id[realm] realm_schemas = self.realms[realm_id].session._schemas return SchemaDocResource(self._templates, realm, realm_schemas) # Nested subpath resource # elif path_config['type'] == 'path': nested_paths = path_config.get('paths', {}) if '/' in nested_paths: nested_resource = self._create_resource(nested_paths['/']) else: nested_resource = Resource404(self._templates, b'') # nest subpaths under the current entry # self._add_paths(nested_resource, nested_paths) return nested_resource else: raise ApplicationError(u"crossbar.error.invalid_configuration", "invalid Web path type '{}' in {} config".format(path_config['type'], 'nested' if nested else 'root')) def stop_router_transport(self, id, details=None): """ Stop a transport currently running in this router worker. :param id: The ID of the transport to stop. :type id: str """ self.log.debug("{}.stop_router_transport".format(self.__class__.__name__), id=id) # FIXME if id not in self.transports: # if not id in self.transports or self.transports[id].status != 'started': emsg = "Cannot stop transport: no transport with ID '{}' or transport is already stopping".format(id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.not_running', emsg) self.log.debug("Stopping transport with ID '{}'".format(id)) d = self.transports[id].port.stopListening() def ok(_): del self.transports[id] def fail(err): raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop transport: {}".format(str(err.value))) d.addCallbacks(ok, fail) return d
def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ self.log.info('Starting {personality} node {method}', personality=self.personality.NAME, method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: raise Exception("No node configuration set") # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = u'{}'.format(socket.gethostname()).lower() _node_id_source = 'hostname' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # local node management router self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_options) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # startup the node personality .. yield self.personality.Node.boot(self) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = { 'shutdown_complete': self._shutdown_complete } returnValue(res)
class Node(object): """ Crossbar.io Standalone node personality. """ NODE_CONTROLLER = NodeController ROUTER_SERVICE = RouterServiceAgent # A Crossbar.io node is the running a controller process and one or multiple # worker processes. # A single Crossbar.io node runs exactly one instance of this class, hence # this class can be considered a system singleton. log = make_logger() def __init__(self, personality, cbdir=None, reactor=None, native_workers=None, options=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ self.personality = personality self.options = options or NodeOptions() self._native_workers = personality.native_workers # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # allow overriding to add (or remove) native-worker types if native_workers is not None: self._native_workers = native_workers # local node management router self._router_factory = None # session factory for node management router self._router_session_factory = None # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # when running in managed mode, this will hold the uplink session to CFC self._manager = None # the node's management realm when running in managed mode (this comes from CFC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CFC!) self._node_id = None # node extra when running in managed mode (this comes from CFC!) self._node_extra = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] # will be filled with a Deferred in start(). the Deferred will fire when # the node has shut down, and the result signals if shutdown was clean self._shutdown_complete = None # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def load_keys(self, cbdir): """ """ self._node_key = _maybe_generate_key(cbdir) def load_config(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in empty config. """ if configfile: configpath = os.path.abspath(os.path.join(self._cbdir, configfile)) self.log.debug('Loading node configuration from "{configpath}" ..', configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = self.personality.check_config_file(self.personality, configpath) self.log.info('Node configuration loaded from {configpath}', configpath=hlid(configpath)) else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } self.personality.check_config(self.personality, self._config) self.log.info('Node configuration loaded from built-in config.') def _add_global_roles(self): self.log.info('No extra node router roles') def _add_worker_role(self, worker_auth_role, options): worker_role_config = { u"name": worker_auth_role, u"permissions": [ # the worker requires these permissions to work: { # worker_auth_role: "crossbar.worker.worker-001" u"uri": worker_auth_role, u"match": u"prefix", u"allow": { u"call": False, u"register": True, u"publish": True, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True }, { u"uri": u"crossbar.get_status", u"match": u"exact", u"allow": { u"call": True, u"register": False, u"publish": False, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True } ] } self._router_factory.add_role(self._realm, worker_role_config) def _drop_worker_role(self, worker_auth_role): self._router_factory.drop_role(self._realm, worker_auth_role) def _extend_worker_args(self, args, options): pass def _add_extra_controller_components(self, controller_options): pass def _set_shutdown_triggers(self, controller_options): # allow to override node shutdown triggers # if 'shutdown' in controller_options: self._node_shutdown_triggers = controller_options['shutdown'] self.log.info("Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers) else: self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) def stop(self): self._controller._shutdown_was_clean = True return self._controller.shutdown() @inlineCallbacks def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ self.log.info('Starting {personality} node {method}', personality=self.personality.NAME, method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: raise Exception("No node configuration set") # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = u'{}'.format(socket.gethostname()).lower() _node_id_source = 'hostname' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # local node management router self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_options) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # startup the node personality .. yield self.personality.Node.boot(self) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = { 'shutdown_complete': self._shutdown_complete } returnValue(res) # returnValue(self._shutdown_complete) def boot(self): self.log.info('Booting node {method}', method=hltype(Node.boot)) return self.boot_from_config(self._config) @inlineCallbacks def boot_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from local configuration {method}', method=hltype(Node.boot_from_config)) # get controller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions()) self.log.debug("controller: manhole started") # startup all workers workers = config.get('workers', []) if len(workers): self.log.info('Starting {nworkers} workers ...', nworkers=len(workers)) else: self.log.info('No workers configured!') for worker in workers: # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = u'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest') worker_type = worker['type'] # native worker processes setup if worker_type in self._native_workers: # set logname depending on native worker type worker_logname = '{} "{}"'.format(self._native_workers[worker_type]['logname'], worker_id) # any worker specific options worker_options = worker.get('options', {}) # now actually start the (native) worker .. yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions()) # setup native worker generic stuff method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError( "A native worker of type '{}' is configured but " "there is no method '{}' on {}".format(worker_type, method_name, type(self)) ) yield config_fn(worker_logname, worker_id, worker) # guest worker processes setup elif worker_type == u'guest': # now actually start the (guest) worker .. # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers # only take the options (which is part of the whole config item for the worker) yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions()) else: raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type)) self.log.info('Local node configuration applied successfully!') @inlineCallbacks def _configure_native_worker_common(self, worker_logname, worker_id, worker): # expanding PYTHONPATH of the newly started worker is now done # directly in NodeController._start_native_worker worker_options = worker.get('options', {}) if False: if 'pythonpath' in worker_options: added_paths = yield self._controller.call(u'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=CallOptions()) self.log.warn("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) # FIXME: as the CPU affinity is in the worker options, this _also_ (see above fix) # should be done directly in NodeController._start_native_worker if True: if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call(u'crossbar.worker.{}.set_cpu_affinity'.format(worker_id), worker_options['cpu_affinity'], options=CallOptions()) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) # this is fine to start after the worker has been started, as manhole is # CB developer/support feature anyways (like a vendor diagnostics port) if 'manhole' in worker: yield self._controller.call(u'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=CallOptions()) self.log.debug("{worker}: manhole started", worker=worker_logname) @inlineCallbacks def _configure_native_worker_router(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=CallOptions()) self.log.info("{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm_role'.format(worker_id), realm_id, role_id, role, options=CallOptions()) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format(self._uplink_no) self._uplink_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm_uplink'.format(worker_id), realm_id, uplink_id, uplink, options=CallOptions()) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=CallOptions()) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no += 1 add_paths_on_transport_create = False yield self._controller.call(u'crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, create_paths=add_paths_on_transport_create, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) if not add_paths_on_transport_create: if transport['type'] == 'web': paths = transport.get('paths', {}) elif transport['type'] == 'universal': paths = transport.get('web', {}).get('paths', {}) else: paths = None if paths: for path in sorted(paths): if path != '/': config = paths[path] yield self._controller.call(u'crossbar.worker.{}.start_web_transport_service'.format(worker_id), transport_id, path, config, options=CallOptions()) self.log.info( "{logname}: web service '{path_type}' started on path '{path}' on transport '{tid}'", logname=worker_logname, path_type=config['type'], path=path, tid=transport_id, ) @inlineCallbacks def _configure_native_worker_container(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # if components exit "very soon after" we try to start them, # we consider that a failure and shut our node down. We remove # this subscription 2 seconds after we're done starting # everything (see below). This is necessary as start_component # returns as soon as we've established a connection to the # component def component_exited(info): component_id = info.get("id") self.log.critical("Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = u'crossbar.worker.{}.container.on_component_stop'.format(worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=CallOptions()) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) @inlineCallbacks def _configure_native_worker_websocket_testee(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no = 1 yield self._controller.call(u'crossbar.worker.{}.start_websocket_testee_transport'.format(worker_id), transport_id, transport, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, )
class TestBrokerPublish(unittest.TestCase): """ Tests for crossbar.router.broker.Broker """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything permissions = RouterPermissions('', True, True, True, True, True) self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, None, default_permissions=permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from _RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, fail, msg): errors.append((fail, msg)) session = TestSession(types.ComponentConfig(u'realm1')) from crossbar.router.session import _RouterApplicationSession # Note to self: original code was logging directly in # _RouterApplicationSession -- which *may* actually be better? # or not... with mock.patch.object(_RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session) if True: self.assertEqual(1, len(errors), "Didn't see our error") self.assertEqual(the_exception, errors[0][0].value) else: # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertEqual(call[1][0].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('log_failure' in call[2]) self.assertEqual(call[2]['log_failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('log_failure' in call[2]) self.assertEqual(call[2]['log_failure'].value, the_exception) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): d2 = self.subscribe(lambda: None, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_publish_closed_session(self): """ ensure a session doesn't get Events if it's closed (see also issue #431) """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() router = mock.MagicMock() broker = Broker(router) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, u'test.topic') # simulate the session state we want, which is that a # transport is connected (._transport != None) but there # _session_id *is* None (not joined yet, or left already) self.assertIs(None, session0._session_id) session0._transport = mock.MagicMock() session1._session_id = 1234 # "from" session should look connected + joined session1._transport = mock.MagicMock() # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success(True)) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session1, pubmsg) # neither session should have sent anything on its transport self.assertEquals(session0._transport.method_calls, []) self.assertEquals(session1._transport.method_calls, [])
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u"." # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's name (must be unique within the management realm) self._node_id = None # the node's management realm self._realm = None # config of this node. self._config = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when run in "managed mode", this will hold the uplink WAMP session # from the node controller to the mananagement application self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath, silence=True) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = {u"controller": {u"cdc": {u"enabled": True}}} checkconfig.check_config(self._config) self.log.info("Node configuration loaded from built-in CDC config.") def _prepare_node_keys(self): from nacl.signing import SigningKey from nacl.encoding import HexEncoder # make sure CBDIR/.cdc exists # cdc_dir = os.path.join(self._cbdir, ".cdc") if os.path.isdir(cdc_dir): pass elif os.path.exists(cdc_dir): raise Exception(".cdc exists, but isn't a directory") else: os.mkdir(cdc_dir) self.log.info("CDC directory created") # load node ID, either from .cdc/node.id or from CDC_NODE_ID # def split_nid(nid_s): nid_c = nid_s.strip().split("@") if len(nid_c) != 2: raise Exception( "illegal node principal '{}' - must follow the form <node id>@<management realm>".format(nid_s) ) node_id, realm = nid_c # FIXME: regex check node_id and realm return node_id, realm nid_file = os.path.join(cdc_dir, "node.id") node_id, realm = None, None if os.path.isfile(nid_file): with open(nid_file, "r") as f: node_id, realm = split_nid(f.read()) elif os.path.exists(nid_file): raise Exception("{} exists, but isn't a file".format(nid_file)) else: if "CDC_NODE_ID" in os.environ: node_id, realm = split_nid(os.environ["CDC_NODE_ID"]) else: raise Exception( "Neither node ID file {} exists nor CDC_NODE_ID environment variable set".format(nid_file) ) # Load the node key, either from .cdc/node.key or from CDC_NODE_KEY. # The node key is a Ed25519 key in either raw format (32 bytes) or in # hex-encoded form (64 characters). # # Actually, what's loaded is not the secret Ed25519 key, but the _seed_ # for that key. Private keys are derived from this 32-byte (256-bit) # random seed value. It is thus the seed value which is sensitive and # must be protected. # skey_file = os.path.join(cdc_dir, "node.key") skey = None if os.path.isfile(skey_file): # FIXME: check file permissions are 0600! # This value is read in here. skey_len = os.path.getsize(skey_file) if skey_len in (32, 64): with open(skey_file, "r") as f: skey_seed = f.read() encoder = None if skey_len == 64: encoder = HexEncoder skey = SigningKey(skey_seed, encoder=encoder) self.log.info("Existing CDC node key loaded from {skey_file}.", skey_file=skey_file) else: raise Exception( "invalid node key length {} (key must either be 32 raw bytes or hex encoded 32 bytes, hence 64 byte char length)" ) elif os.path.exists(skey_file): raise Exception("{} exists, but isn't a file".format(skey_file)) else: skey = SigningKey.generate() skey_seed = skey.encode(encoder=HexEncoder) with open(skey_file, "w") as f: f.write(skey_seed) # set file mode to read only for owner # 384 (decimal) == 0600 (octal) - we use that for Py2/3 reasons os.chmod(skey_file, 384) self.log.info("New CDC node key {skey_file} generated.", skey_file=skey_file) return realm, node_id, skey @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") controller_config = self._config.get("controller", {}) controller_options = controller_config.get("options", {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get("title", "crossbar-controller")) # the node controller realm # self._realm = controller_config.get("realm", "crossbar") # the node's name (must be unique within the management realm when running # in "managed mode") # if "id" in controller_config: self._node_id = controller_config["id"] self.log.info("Node ID '{node_id}' set from config", node_id=self._node_id) elif "CDC_ID" in os.environ: self._node_id = u"{}".format(os.environ["CDC_ID"]) self.log.info("Node ID '{node_id}' set from environment variable CDC_ID", node_id=self._node_id) else: self._node_id = u"{}".format(socket.gethostname()) self.log.info("Node ID '{node_id}' set from hostname", node_id=self._node_id) # standalone vs managed mode # if "cdc" in controller_config and controller_config["cdc"].get("enabled", False): self._prepare_node_keys() cdc_config = controller_config["cdc"] # CDC connecting transport # if "transport" in cdc_config: transport = cdc_config["transport"] if "tls" in transport["endpoint"]: hostname = transport["endpoint"]["tls"]["hostname"] else: raise Exception("TLS activated on CDC connection, but 'hostname' not provided") self.log.warn("CDC transport configuration overridden from node config!") else: transport = { "type": u"websocket", "url": u"wss://devops.crossbario.com/ws", "endpoint": { "type": u"tcp", "host": u"devops.crossbario.com", "port": 443, "timeout": 5, "tls": {"hostname": u"devops.crossbario.com"}, }, } hostname = u"devops.crossbario.com" # CDC management realm # if "realm" in cdc_config: realm = cdc_config["realm"] self.log.info("CDC management realm '{realm}' set from config", realm=realm) elif "CDC_REALM" in os.environ: realm = u"{}".format(os.environ["CDC_REALM"]).strip() self.log.info("CDC management realm '{realm}' set from enviroment variable CDC_REALM", realm=realm) else: raise Exception( "CDC management realm not set - either 'realm' must be set in node configuration, or in CDC_REALM enviroment variable" ) # CDC authentication credentials (for WAMP-CRA) # authid = self._node_id if "secret" in cdc_config: authkey = cdc_config["secret"] self.log.info("CDC authentication secret loaded from config") elif "CDC_SECRET" in os.environ: authkey = u"{}".format(os.environ["CDC_SECRET"]).strip() self.log.info("CDC authentication secret loaded from environment variable CDC_SECRET") else: raise Exception( "CDC authentication secret not set - either 'secret' must be set in node configuration, or in CDC_SECRET enviroment variable" ) # extra info forwarded to CDC client session # extra = {"node": self, "onready": Deferred(), "onexit": Deferred(), "authid": authid, "authkey": authkey} runner = ApplicationRunner( url=transport["url"], realm=realm, extra=extra, ssl=optionsForClientTLS(hostname), debug=False, debug_wamp=False, ) try: self.log.info("Connecting to CDC at '{url}' ..", url=transport["url"]) yield runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._manager = yield extra["onready"] except Exception as e: raise Exception("Could not connect to CDC - {}".format(e)) # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED] self.log.info("Connected to Crossbar.io DevOps Center (CDC)! Your node runs in managed mode.") else: self._manager = None # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # allow to override node shutdown triggers # if "shutdown" in controller_options: self.log.info( "Overriding default node shutdown triggers with {} from node config".format( controller_options["shutdown"] ) ) self._node_shutdown_triggers = controller_options["shutdown"] else: self.log.info("Using default node shutdown triggers {}".format(self._node_shutdown_triggers)) # router and factory that creates router sessions # self._router_factory = RouterFactory(self._node_id) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = {"name": self._realm} rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u"trusted") if self._manager: self._bridge_session = NodeManagementBridgeSession(cfg, self, self._manager) self._router_session_factory.add(self._bridge_session, authrole=u"trusted") else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u"trusted") # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl["dist"], name=wpl["name"]) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): # fake call details information when calling into # remoted procedure locally # call_details = CallDetails(caller=0) controller = config.get("controller", {}) # start Manhole in node controller # if "manhole" in controller: yield self._controller.start_manhole(controller["manhole"], details=call_details) # startup all workers # worker_no = 1 call_options = CallOptions(disclose_me=True) for worker in config.get("workers", []): # worker ID, type and logname # if "id" in worker: worker_id = worker.pop("id") else: worker_id = "worker{}".format(worker_no) worker_no += 1 worker_type = worker["type"] worker_options = worker.get("options", {}) if worker_type == "router": worker_logname = "Router '{}'".format(worker_id) elif worker_type == "container": worker_logname = "Container '{}'".format(worker_id) elif worker_type == "websocket-testee": worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == "guest": worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # router/container # if worker_type in ["router", "container", "websocket-testee"]: # start a new native worker process .. # if worker_type == "router": yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == "container": yield self._controller.start_container(worker_id, worker_options, details=call_details) elif worker_type == "websocket-testee": yield self._controller.start_websocket_testee(worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff # if "pythonpath" in worker_options: added_paths = yield self._controller.call( "crossbar.node.{}.worker.{}.add_pythonpath".format(self._node_id, worker_id), worker_options["pythonpath"], options=call_options, ) self.log.debug( "{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths ) if "cpu_affinity" in worker_options: new_affinity = yield self._controller.call( "crossbar.node.{}.worker.{}.set_cpu_affinity".format(self._node_id, worker_id), worker_options["cpu_affinity"], options=call_options, ) self.log.debug( "{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity ) if "manhole" in worker: yield self._controller.call( "crossbar.node.{}.worker.{}.start_manhole".format(self._node_id, worker_id), worker["manhole"], options=call_options, ) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker # if worker_type == "router": # start realms on router # realm_no = 1 for realm in worker.get("realms", []): if "id" in realm: realm_id = realm.pop("id") else: realm_id = "realm{}".format(realm_no) realm_no += 1 # extract schema information from WAMP-flavored Markdown # schemas = None if "schemas" in realm: schemas = {} schema_pat = re.compile(r"```javascript(.*?)```", re.DOTALL) cnt_files = 0 cnt_decls = 0 for schema_file in realm.pop("schemas"): schema_file = os.path.join(self._cbdir, schema_file) self.log.info( "{worker}: processing WAMP-flavored Markdown file {schema_file} for WAMP schema declarations", worker=worker_logname, schema_file=schema_file, ) with open(schema_file, "r") as f: cnt_files += 1 for d in schema_pat.findall(f.read()): try: o = json.loads(d) if ( isinstance(o, dict) and "$schema" in o and o["$schema"] == u"http://wamp.ws/schema#" ): uri = o["uri"] if uri not in schemas: schemas[uri] = {} schemas[uri].update(o) cnt_decls += 1 except Exception: self.log.failure( "{worker}: WARNING - failed to process declaration in {schema_file} - {log_failure.value}", worker=worker_logname, schema_file=schema_file, ) self.log.info( "{worker}: processed {cnt_files} files extracting {cnt_decls} schema declarations and {len_schemas} URIs", worker=worker_logname, cnt_files=cnt_files, cnt_decls=cnt_decls, len_schemas=len(schemas), ) enable_trace = realm.get("trace", False) yield self._controller.call( "crossbar.node.{}.worker.{}.start_router_realm".format(self._node_id, worker_id), realm_id, realm, schemas, enable_trace=enable_trace, options=call_options, ) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm["name"], enable_trace=enable_trace, ) # add roles to realm # role_no = 1 for role in realm.get("roles", []): if "id" in role: role_id = role.pop("id") else: role_id = "role{}".format(role_no) role_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_router_realm_role".format(self._node_id, worker_id), realm_id, role_id, role, options=call_options, ) self.log.info( "{}: role '{}' (named '{}') started on realm '{}'".format( worker_logname, role_id, role["name"], realm_id ) ) # start uplinks for realm # uplink_no = 1 for uplink in realm.get("uplinks", []): if "id" in uplink: uplink_id = uplink.pop("id") else: uplink_id = "uplink{}".format(uplink_no) uplink_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_router_realm_uplink".format(self._node_id, worker_id), realm_id, uplink_id, uplink, options=call_options, ) self.log.info( "{}: uplink '{}' started on realm '{}'".format(worker_logname, uplink_id, realm_id) ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router # connection_no = 1 for connection in worker.get("connections", []): if "id" in connection: connection_id = connection.pop("id") else: connection_id = "connection{}".format(connection_no) connection_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_connection".format(self._node_id, worker_id), connection_id, connection, options=call_options, ) self.log.info("{}: connection '{}' started".format(worker_logname, connection_id)) # start components to run embedded in the router # component_no = 1 for component in worker.get("components", []): if "id" in component: component_id = component.pop("id") else: component_id = "component{}".format(component_no) component_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_router_component".format(self._node_id, worker_id), component_id, component, options=call_options, ) self.log.info("{}: component '{}' started".format(worker_logname, component_id)) # start transports on router # transport_no = 1 for transport in worker["transports"]: if "id" in transport: transport_id = transport.pop("id") else: transport_id = "transport{}".format(transport_no) transport_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_router_transport".format(self._node_id, worker_id), transport_id, transport, options=call_options, ) self.log.info("{}: transport '{}' started".format(worker_logname, transport_id)) # setup container worker # elif worker_type == "container": component_no = 1 # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id ) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = "crossbar.node.{}.worker.{}.container.on_component_stop".format(self._node_id, worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # connection_no = 1 for connection in worker.get("connections", []): if "id" in connection: connection_id = connection.pop("id") else: connection_id = "connection{}".format(connection_no) connection_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_connection".format(self._node_id, worker_id), connection_id, connection, options=call_options, ) self.log.info("{}: connection '{}' started".format(worker_logname, connection_id)) # start components to run embedded in the container # for component in worker.get("components", []): if "id" in component: component_id = component.pop("id") else: component_id = "component{}".format(component_no) component_no += 1 yield self._controller.call( "crossbar.node.{}.worker.{}.start_container_component".format(self._node_id, worker_id), component_id, component, options=call_options, ) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id, ) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker # elif worker_type == "websocket-testee": # start transports on router # transport = worker["transport"] transport_no = 1 transport_id = "transport{}".format(transport_no) yield self._controller.call( "crossbar.node.{}.worker.{}.start_websocket_testee_transport".format(self._node_id, worker_id), transport_id, transport, options=call_options, ) self.log.info("{}: transport '{}' started".format(worker_logname, transport_id)) else: raise Exception("logic error") elif worker_type == "guest": # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
class CallerTestCase(TestCase): """ Unit tests for L{CallerResource}. """ def setUp(self): # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.myapp.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) @inlineCallbacks def test_add2(self): """ Test a very basic call where you square root a number. This has one arg, no kwargs, and no authorisation. """ session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u"test_role") session2 = ApplicationSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session2, authrole=u"test_role") resource = CallerResource({}, session2) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b'{"procedure": "com.myapp.sqrt", "args": [2]}') self.assertEqual(request.code, 200) self.assertEqual(json.loads(native_string(request.get_written_data())), {"args": [1.4142135623730951]}) logs = l.get_category("AR202") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["code"], 200) @inlineCallbacks def test_failure(self): """ A failed call returns the error to the client. """ session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u"test_role") session2 = ApplicationSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session2, authrole=u"test_role") resource = CallerResource({}, session2) tests = [ (u"com.myapp.sqrt", (0,), {u"error": u"wamp.error.runtime_error", u"args": [u"don't ask foolish questions ;)"], u"kwargs": {}}), (u"com.myapp.checkname", ("foo",), {u"error": u"com.myapp.error.reserved", u"args": [], u"kwargs": {}}), (u"com.myapp.checkname", ("*",), {u"error": u"com.myapp.error.invalid_length", u"args": [], u"kwargs": {"min": 3, "max": 10}}), (u"com.myapp.checkname", ("hello",), {u"error": u"com.myapp.error.mixed_case", u"args": ["hello", "HELLO"], u"kwargs": {}}), (u"com.myapp.compare", (1, 10), {u"error": u"com.myapp.error1", u"args": [9], u"kwargs": {}}), ] for procedure, args, err in tests: with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=dump_json({"procedure": procedure, "args": args}).encode('utf8')) self.assertEqual(request.code, 200) self.assertEqual(json.loads(native_string(request.get_written_data())), err) logs = l.get_category("AR458") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["code"], 200) # We manually logged the errors; we can flush them from the log self.flushLoggedErrors() @inlineCallbacks def test_cb_failure(self): """ Test that calls with no procedure in the request body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b'{"procedure": "foo"}') self.assertEqual(request.code, 500) self.assertEqual(json.loads(native_string(request.get_written_data())), {"error": "wamp.error.runtime_error", "args": ["Sorry, Crossbar.io has encountered a problem."], "kwargs": {}}) errors = l.get_category("AR500") self.assertEqual(len(errors), 1) # We manually logged the errors; we can flush them from the log self.flushLoggedErrors() @inlineCallbacks def test_no_procedure(self): """ Test that calls with no procedure in the request body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b"{}") self.assertEqual(request.code, 400) errors = l.get_category("AR455") self.assertEqual(len(errors), 1) self.assertEqual(errors[0]["code"], 400) @inlineCallbacks def test_no_body(self): """ Test that calls with no body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}) self.assertEqual(request.code, 400) errors = l.get_category("AR453") self.assertEqual(len(errors), 1) self.assertEqual(errors[0]["code"], 400)
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's management realm when running in managed mode (this comes from CDC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CDC!) self._node_id = None # node extra when running in managed mode (this comes from CDC!) self._node_extra = None # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when running in managed mode, this will hold the bridge session # attached to the local management router self._bridge_session = None # when running in managed mode, this will hold the uplink session to CDC self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # map from router worker IDs to self._realm_templates = {} # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def maybe_generate_key(self, cbdir, privkey_path=u'key.priv', pubkey_path=u'key.pub'): privkey_path = os.path.join(cbdir, privkey_path) pubkey_path = os.path.join(cbdir, pubkey_path) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_keyfile(privkey_path, private=True) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519' ]: if tag not in priv_tags: raise Exception( "Corrupt node private key file {} - {} tag not found". format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = SigningKey(privkey_hex, encoder=HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path)) if os.path.exists(pubkey_path): pub_tags = _parse_keyfile(pubkey_path, private=False) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519' ]: if tag not in pub_tags: raise Exception( "Corrupt node public key file {} - {} tag not found" .format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519" ).format(pubkey_path)) else: self.log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) self.log.debug("Node key already exists (public key: {hex})", hex=pubkey_hex) else: # node private key does not yet exist: generate one privkey = SigningKey.generate() privkey_hex = privkey.encode(encoder=HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) self.log.info("New node key pair generated!") # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path ).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) self.log.info("File permissions on node public key fixed!") if os.stat(privkey_path ).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) self.log.info("File permissions on node private key fixed!") self._node_key = cryptosign.SigningKey(privkey) return pubkey_hex def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = {u'version': 2, u'controller': {}, u'workers': []} checkconfig.check_config(self._config) self.log.info("Node configuration loaded from built-in config.") @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") if not cdc_mode and not self._config.get( "controller", {}) and not self._config.get("workers", {}): self.log.warn( ("You seem to have no controller config or workers, nor are " "starting up in CDC mode. Check your config exists, or pass " "--cdc to `crossbar start`.")) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass return # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) # create a new router for the realm # rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') # add a router bridge session when running in managed mode # if cdc_mode: self._bridge_session = NodeManagementBridgeSession(cfg) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # Node shutdown mode # if cdc_mode: # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED ] else: # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info( "Overriding default node shutdown triggers with {triggers} from node config", triggers=controller_options['shutdown']) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) # add the node controller singleton session # self._controller = NodeControllerSession(self) self._router_session_factory.add(self._controller, authrole=u'trusted') # detect WAMPlets (FIXME: remove this!) # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: # startup the node from local node configuration # yield self._startup(self._config) # connect to CDC when running in managed mode # if cdc_mode: cdc_config = controller_config.get( 'cdc', { # CDC connecting transport u'transport': { u'type': u'websocket', u'url': u'wss://cdc.crossbario.com/ws', u'endpoint': { u'type': u'tcp', u'host': u'cdc.crossbario.com', u'port': 443, u'timeout': 5, u'tls': { u'hostname': u'cdc.crossbario.com' } } } }) transport = cdc_config[u'transport'] hostname = None if u'tls' in transport[u'endpoint']: transport[u'endpoint'][u'tls'][u'hostname'] runner = ApplicationRunner( url=transport['url'], realm=None, extra=None, ssl=optionsForClientTLS(hostname) if hostname else None, ) def make(config): # extra info forwarded to CDC client session extra = { 'node': self, 'on_ready': Deferred(), 'on_exit': Deferred(), 'node_key': self._node_key, } @inlineCallbacks def on_ready(res): self._manager, self._management_realm, self._node_id, self._node_extra = res if self._bridge_session: try: yield self._bridge_session.attach_manager( self._manager, self._management_realm, self._node_id) status = yield self._manager.call( u'cdc.remote.status@1') except: self.log.failure() else: self.log.info( 'Connected to CDC for management realm "{realm}" (current time is {now})', realm=self._management_realm, now=status[u'now']) else: self.log.warn( 'Uplink CDC session established, but no bridge session setup!' ) @inlineCallbacks def on_exit(res): if self._bridge_session: try: yield self._bridge_session.detach_manager() except: self.log.failure() else: self.log.info( 'Disconnected from CDC for management realm "{realm}"', realm=self._management_realm) else: self.log.warn( 'Uplink CDC session lost, but no bridge session setup!' ) self._manager, self._management_realm, self._node_id, self._node_extra = None, None, None, None extra['on_ready'].addCallback(on_ready) extra['on_exit'].addCallback(on_exit) config = ComponentConfig(extra=extra) session = NodeManagementSession(config) return session self.log.info("Connecting to CDC at '{url}' ..", url=transport[u'url']) yield runner.run(make, start_reactor=False, auto_reconnect=True) # Notify systemd that crossbar is fully up and running # (this has no effect on non-systemd platforms) try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from local configuration ...') # call options we use to call into the local node management API call_options = CallOptions() # fake call details we use to call into the local node management API call_details = CallDetails(caller=0) # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers workers = config.get('workers', []) if len(workers): self.log.info('Starting {nworkers} workers ...', nworkers=len(workers)) else: self.log.info('No workers configured!') for worker in workers: # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type - a type of working process from the following fixed list worker_type = worker['type'] assert (worker_type in ['router', 'container', 'guest', 'websocket-testee']) # set logname depending on worker type if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # any worker specific options worker_options = worker.get('options', {}) # native worker processes: router, container, websocket-testee if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container( worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee( worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff if 'pythonpath' in worker_options: added_paths = yield self._controller.call( 'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( 'crossbar.worker.{}.set_cpu_affinity'.format( worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call( 'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker if worker_type == 'router': # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm'.format( worker_id), realm_id, realm, options=call_options) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm_role'. format(worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format( self._uplink_no) self._uplink_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm_uplink'. format(worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_component'.format( worker_id), component_id, component, options=call_options) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_transport'.format( worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) # setup container worker elif worker_type == 'container': # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.worker.{}.container.on_component_stop'.format( worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_container_component'. format(worker_id), component_id, component, options=call_options) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker elif worker_type == 'websocket-testee': # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no = 1 yield self._controller.call( 'crossbar.worker.{}.start_websocket_testee_transport'. format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error") self.log.info('Local node configuration applied.')
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ def __init__(self, reactor, options): """ Ctor. :param reactor: Reactor to run on. :type reactor: obj :param options: Options from command line. :type options: obj """ self.log = make_logger() self.options = options # the reactor under which we run self._reactor = reactor # shortname for reactor to run (when given via explicit option) or None self._reactor_shortname = options.reactor # node directory self._cbdir = options.cbdir # the node's name (must be unique within the management realm) self._node_id = None # the node's management realm self._realm = None # node controller session (a singleton ApplicationSession embedded # in the node's management router) self._controller = None # config of this node. self._config = None def check_config(self): """ Check the configuration of this node. """ # for now, a node is always started from a local configuration # configfile = os.path.join(self.options.cbdir, self.options.config) self.log.info("Loading node configuration file '{configfile}'", configfile=configfile) self._config = check_config_file(configfile, silence=True) @inlineCallbacks def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: self.check_config() controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle(controller_title) # the node's name (must be unique within the management realm) if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() if 'manager' in controller_config: extra = { 'onready': Deferred(), # authentication information for connecting to uplinkg CDC router # using WAMP-CRA authentication # 'authid': self._node_id, 'authkey': controller_config['manager']['key'] } realm = controller_config['manager']['realm'] transport = controller_config['manager']['transport'] runner = ApplicationRunner(url=transport['url'], realm=realm, extra=extra, debug_wamp=False) runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._management_session = yield extra['onready'] self.log.info( "Node is connected to Crossbar.io DevOps Center (CDC)") else: self._management_session = None # the node's management realm self._realm = controller_config.get('realm', 'crossbar') # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) rlm = RouterRealm(None, {'name': self._realm}) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._management_session: self._bridge_session = NodeManagementBridgeSession( cfg, self._management_session) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.info("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True for line in e.args[0].strip().splitlines(): self.log.error(line) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): # Setup node according to the local configuration provided. # fake call details information when calling into # remoted procedure locally # call_details = CallDetails(caller=0) controller = config.get('controller', {}) # start Manhole in node controller # if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers # worker_no = 1 call_options = CallOptions(disclose_me=True) for worker in config.get('workers', []): # worker ID, type and logname # if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker{}'.format(worker_no) worker_no += 1 worker_type = worker['type'] worker_options = worker.get('options', {}) if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # router/container # if worker_type in ['router', 'container']: # start a new native worker process .. # if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container( worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff # if 'pythonpath' in worker_options: added_paths = yield self._controller.call( 'crossbar.node.{}.worker.{}.add_pythonpath'.format( self._node_id, worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( 'crossbar.node.{}.worker.{}.set_cpu_affinity'.format( self._node_id, worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call( 'crossbar.node.{}.worker.{}.start_manhole'.format( self._node_id, worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker # if worker_type == 'router': # start realms on router # realm_no = 1 for realm in worker.get('realms', []): if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm{}'.format(realm_no) realm_no += 1 # extract schema information from WAMP-flavored Markdown # schemas = None if 'schemas' in realm: schemas = {} schema_pat = re.compile(r"```javascript(.*?)```", re.DOTALL) cnt_files = 0 cnt_decls = 0 for schema_file in realm.pop('schemas'): schema_file = os.path.join( self.options.cbdir, schema_file) self.log.info( "{worker}: processing WAMP-flavored Markdown file {schema_file} for WAMP schema declarations", worker=worker_logname, schema_file=schema_file) with open(schema_file, 'r') as f: cnt_files += 1 for d in schema_pat.findall(f.read()): try: o = json.loads(d) if isinstance( o, dict ) and '$schema' in o and o[ '$schema'] == u'http://wamp.ws/schema#': uri = o['uri'] if uri not in schemas: schemas[uri] = {} schemas[uri].update(o) cnt_decls += 1 except Exception: self.log.failure( "{worker}: WARNING - failed to process declaration in {schema_file} - {log_failure.value}", worker=worker_logname, schema_file=schema_file) self.log.info( "{worker}: processed {cnt_files} files extracting {cnt_decls} schema declarations and {len_schemas} URIs", worker=worker_logname, cnt_files=cnt_files, cnt_decls=cnt_decls, len_schemas=len(schemas)) yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_realm'. format(self._node_id, worker_id), realm_id, realm, schemas, options=call_options) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm # role_no = 1 for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role{}'.format(role_no) role_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_realm_role' .format(self._node_id, worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{}: role '{}' (named '{}') started on realm '{}'" .format(worker_logname, role_id, role['name'], realm_id)) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router # connection_no = 1 for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection{}'.format( connection_no) connection_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_connection'. format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format( worker_logname, connection_id)) # start components to run embedded in the router # component_no = 1 for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_component' .format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{}: component '{}' started".format( worker_logname, component_id)) # start transports on router # transport_no = 1 for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport{}'.format(transport_no) transport_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_transport' .format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format( worker_logname, transport_id)) # setup container worker # elif worker_type == 'container': component_no = 1 # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.id if hasattr(info, 'id') else None self.log.info( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.node.{}.worker.{}.container.on_component_stop'.format( self._node_id, worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # connection_no = 1 for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection{}'.format( connection_no) connection_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_connection'. format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format( worker_logname, connection_id)) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_container_component' .format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ def __init__(self, reactor, options): """ Ctor. :param reactor: Reactor to run on. :type reactor: obj :param options: Options from command line. :type options: obj """ self.log = make_logger() self.options = options # the reactor under which we run self._reactor = reactor # shortname for reactor to run (when given via explicit option) or None self._reactor_shortname = options.reactor # node directory self._cbdir = options.cbdir # the node's name (must be unique within the management realm) self._node_id = None # the node's management realm self._realm = None # node controller session (a singleton ApplicationSession embedded # in the node's management router) self._controller = None @inlineCallbacks def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ # for now, a node is always started from a local configuration # configfile = os.path.join(self.options.cbdir, self.options.config) self.log.info("Starting from node configuration file '{configfile}'", configfile=configfile) self._config = check_config_file(configfile, silence=True) controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_title) # the node's name (must be unique within the management realm) if 'manager' in self._config: self._node_id = self._config['manager']['id'] else: if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() if 'manager' in self._config: extra = { 'onready': Deferred() } runner = ApplicationRunner(url=u"ws://localhost:9000", realm=u"cdc-oberstet-1", extra=extra) runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._management_session = yield extra['onready'] self.log.info("Connected to Crossbar.io Management Cloud: {management_session}", management_session=self._management_session) else: self._management_session = None # the node's management realm self._realm = controller_config.get('realm', 'crossbar') # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory(self._router_factory) rlm = RouterRealm(None, {'name': self._realm}) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._management_session: self._bridge_session = NodeManagementBridgeSession(cfg, self._management_session) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # # session_config = ComponentConfig(realm = options.realm, extra = options) self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.info("No WAMPlets detected in enviroment.") try: if 'manager' in self._config: yield self._startup_managed(self._config) else: yield self._startup_standalone(self._config) except: traceback.print_exc() try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup_managed(self, config): """ Connect the node to an upstream management application. The node will run in "managed" mode (as opposed to "standalone" mode). """ yield sleep(1) @inlineCallbacks def _startup_standalone(self, config): """ Setup node according to the local configuration provided. The node will run in "standalone" mode (as opposed to "managed" mode). """ # fake call details information when calling into # remoted procedure locally # call_details = CallDetails(caller=0) controller = config.get('controller', {}) # start Manhole in node controller # if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # start local transport for management router # if 'transport' in controller: yield self._controller.start_management_transport(controller['transport'], details=call_details) # startup all workers # worker_no = 1 call_options = CallOptions(disclose_me=True) for worker in config.get('workers', []): # worker ID, type and logname # if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker{}'.format(worker_no) worker_no += 1 worker_type = worker['type'] worker_options = worker.get('options', {}) if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # router/container # if worker_type in ['router', 'container']: # start a new native worker process .. # if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container(worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff # if 'pythonpath' in worker_options: added_paths = yield self._controller.call('crossbar.node.{}.worker.{}.add_pythonpath'.format(self._node_id, worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call('crossbar.node.{}.worker.{}.set_cpu_affinity'.format(self._node_id, worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call('crossbar.node.{}.worker.{}.start_manhole'.format(self._node_id, worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker # if worker_type == 'router': # start realms on router # realm_no = 1 for realm in worker.get('realms', []): if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm{}'.format(realm_no) realm_no += 1 # extract schema information from WAMP-flavored Markdown # schemas = None if 'schemas' in realm: schemas = {} schema_pat = re.compile(r"```javascript(.*?)```", re.DOTALL) cnt_files = 0 cnt_decls = 0 for schema_file in realm.pop('schemas'): schema_file = os.path.join(self.options.cbdir, schema_file) self.log.info("{worker}: processing WAMP-flavored Markdown file {schema_file} for WAMP schema declarations", worker=worker_logname, schema_file=schema_file) with open(schema_file, 'r') as f: cnt_files += 1 for d in schema_pat.findall(f.read()): try: o = json.loads(d) if isinstance(o, dict) and '$schema' in o and o['$schema'] == u'http://wamp.ws/schema#': uri = o['uri'] if uri not in schemas: schemas[uri] = {} schemas[uri].update(o) cnt_decls += 1 except Exception: self.log.failure("{worker}: WARNING - failed to process declaration in {schema_file} - {log_failure.value}", worker=worker_logname, schema_file=schema_file) self.log.info("{worker}: processed {cnt_files} files extracting {cnt_decls} schema declarations and {len_schemas} URIs", worker=worker_logname, cnt_files=cnt_files, cnt_decls=cnt_decls, len_schemas=len(schemas)) yield self._controller.call('crossbar.node.{}.worker.{}.start_router_realm'.format(self._node_id, worker_id), realm_id, realm, schemas, options=call_options) self.log.info("{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm # role_no = 1 for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role{}'.format(role_no) role_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_realm_role'.format(self._node_id, worker_id), realm_id, role_id, role, options=call_options) self.log.info("{}: role '{}' (named '{}') started on realm '{}'".format(worker_logname, role_id, role['name'], realm_id)) # start components to run embedded in the router # component_no = 1 for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_component'.format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{}: component '{}' started".format(worker_logname, component_id)) # start transports on router # transport_no = 1 for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport{}'.format(transport_no) transport_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_transport'.format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format(worker_logname, transport_id)) # setup container worker # elif worker_type == 'container': component_no = 1 # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): dead_comp = info['id'] self.log.info("Component '{}' failed to start; shutting down node.".format(dead_comp)) if self._reactor.running: self._reactor.stop() topic = 'crossbar.node.{}.worker.{}.container.on_component_stop'.format(self._node_id, worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_container_component'.format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's name (must be unique within the management realm) self._node_id = None # the node's management realm self._realm = None # config of this node. self._config = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when run in "managed mode", this will hold the uplink WAMP session # from the node controller to the mananagement application self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # map fro router worker IDs to self._realm_templates = {} # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = { u"controller": { u"cdc": { u"enabled": True } } } checkconfig.check_config(self._config) self.log.info("Node configuration loaded from built-in CDC config.") def _prepare_node_keys(self): from nacl.signing import SigningKey from nacl.encoding import HexEncoder # make sure CBDIR/.cdc exists # cdc_dir = os.path.join(self._cbdir, '.cdc') if os.path.isdir(cdc_dir): pass elif os.path.exists(cdc_dir): raise Exception(".cdc exists, but isn't a directory") else: os.mkdir(cdc_dir) self.log.info("CDC directory created") # load node ID, either from .cdc/node.id or from CDC_NODE_ID # def split_nid(nid_s): nid_c = nid_s.strip().split('@') if len(nid_c) != 2: raise Exception("illegal node principal '{}' - must follow the form <node id>@<management realm>".format(nid_s)) node_id, realm = nid_c # FIXME: regex check node_id and realm return node_id, realm nid_file = os.path.join(cdc_dir, 'node.id') node_id, realm = None, None if os.path.isfile(nid_file): with open(nid_file, 'r') as f: node_id, realm = split_nid(f.read()) elif os.path.exists(nid_file): raise Exception("{} exists, but isn't a file".format(nid_file)) else: if 'CDC_NODE_ID' in os.environ: node_id, realm = split_nid(os.environ['CDC_NODE_ID']) else: raise Exception("Neither node ID file {} exists nor CDC_NODE_ID environment variable set".format(nid_file)) # Load the node key, either from .cdc/node.key or from CDC_NODE_KEY. # The node key is a Ed25519 key in either raw format (32 bytes) or in # hex-encoded form (64 characters). # # Actually, what's loaded is not the secret Ed25519 key, but the _seed_ # for that key. Private keys are derived from this 32-byte (256-bit) # random seed value. It is thus the seed value which is sensitive and # must be protected. # skey_file = os.path.join(cdc_dir, 'node.key') skey = None if os.path.isfile(skey_file): # FIXME: check file permissions are 0600! # This value is read in here. skey_len = os.path.getsize(skey_file) if skey_len in (32, 64): with open(skey_file, 'r') as f: skey_seed = f.read() encoder = None if skey_len == 64: encoder = HexEncoder skey = SigningKey(skey_seed, encoder=encoder) self.log.info("Existing CDC node key loaded from {skey_file}.", skey_file=skey_file) else: raise Exception("invalid node key length {} (key must either be 32 raw bytes or hex encoded 32 bytes, hence 64 byte char length)") elif os.path.exists(skey_file): raise Exception("{} exists, but isn't a file".format(skey_file)) else: skey = SigningKey.generate() skey_seed = skey.encode(encoder=HexEncoder) with open(skey_file, 'w') as f: f.write(skey_seed) # set file mode to read only for owner # 384 (decimal) == 0600 (octal) - we use that for Py2/3 reasons os.chmod(skey_file, 384) self.log.info("New CDC node key {skey_file} generated.", skey_file=skey_file) return realm, node_id, skey @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # the node controller realm # self._realm = controller_config.get(u'realm', u'crossbar') # the node's name (must be unique within the management realm when running # in "managed mode") # if 'id' in controller_config: self._node_id = controller_config['id'] self.log.info("Node ID '{node_id}' set from config", node_id=self._node_id) elif 'CDC_ID' in os.environ: self._node_id = u'{}'.format(os.environ['CDC_ID']) self.log.info("Node ID '{node_id}' set from environment variable CDC_ID", node_id=self._node_id) else: self._node_id = u'{}'.format(socket.gethostname()) self.log.info("Node ID '{node_id}' set from hostname", node_id=self._node_id) # standalone vs managed mode # if 'cdc' in controller_config and controller_config['cdc'].get('enabled', False): self._prepare_node_keys() cdc_config = controller_config['cdc'] # CDC connecting transport # if 'transport' in cdc_config: transport = cdc_config['transport'] if 'tls' in transport['endpoint']: if 'hostname' in transport['endpoint']: hostname = transport['endpoint']['tls']['hostname'] else: raise Exception("TLS activated on CDC connection, but 'hostname' not provided") else: hostname = None self.log.warn("CDC transport configuration overridden from node config!") else: transport = { "type": u"websocket", "url": u"wss://devops.crossbario.com/ws", "endpoint": { "type": u"tcp", "host": u"devops.crossbario.com", "port": 443, "timeout": 5, "tls": { "hostname": u"devops.crossbario.com" } } } hostname = u'devops.crossbario.com' # CDC management realm # if 'realm' in cdc_config: realm = cdc_config['realm'] self.log.info("CDC management realm '{realm}' set from config", realm=realm) elif 'CDC_REALM' in os.environ: realm = u"{}".format(os.environ['CDC_REALM']).strip() self.log.info("CDC management realm '{realm}' set from enviroment variable CDC_REALM", realm=realm) else: raise Exception("CDC management realm not set - either 'realm' must be set in node configuration, or in CDC_REALM enviroment variable") # CDC authentication credentials (for WAMP-CRA) # authid = self._node_id if 'secret' in cdc_config: authkey = cdc_config['secret'] self.log.info("CDC authentication secret loaded from config") elif 'CDC_SECRET' in os.environ: authkey = u"{}".format(os.environ['CDC_SECRET']).strip() self.log.info("CDC authentication secret loaded from environment variable CDC_SECRET") else: raise Exception("CDC authentication secret not set - either 'secret' must be set in node configuration, or in CDC_SECRET enviroment variable") # extra info forwarded to CDC client session # extra = { 'node': self, 'onready': Deferred(), 'onexit': Deferred(), 'authid': authid, 'authkey': authkey } runner = ApplicationRunner( url=transport['url'], realm=realm, extra=extra, ssl=optionsForClientTLS(hostname) if hostname else None, ) try: self.log.info("Connecting to CDC at '{url}' ..", url=transport['url']) yield runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._manager = yield extra['onready'] except Exception as e: raise Exception("Could not connect to CDC - {}".format(e)) # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED] self.log.info("Connected to Crossbar.io DevOps Center (CDC)! Your node runs in managed mode.") else: self._manager = None # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info("Overriding default node shutdown triggers with {} from node config".format(controller_options['shutdown'])) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {}".format(self._node_shutdown_triggers)) # router and factory that creates router sessions # self._router_factory = RouterFactory(self._node_id) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._manager: self._bridge_session = NodeManagementBridgeSession(cfg, self, self._manager) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) # Notify systemd that crossbar is fully up and running # This has no effect on non-systemd platforms try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): """ Startup elements in the node as specified in the provided node configuration. """ # call options we use to call into the local node management API call_options = CallOptions() # fake call details we use to call into the local node management API call_details = CallDetails(caller=0) # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers for worker in config.get('workers', []): # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type - a type of working process from the following fixed list worker_type = worker['type'] assert(worker_type in ['router', 'container', 'guest', 'websocket-testee']) # set logname depending on worker type if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # any worker specific options worker_options = worker.get('options', {}) # native worker processes: router, container, websocket-testee if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container(worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee(worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff if 'pythonpath' in worker_options: added_paths = yield self._controller.call('crossbar.node.{}.worker.{}.add_pythonpath'.format(self._node_id, worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call('crossbar.node.{}.worker.{}.set_cpu_affinity'.format(self._node_id, worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call('crossbar.node.{}.worker.{}.start_manhole'.format(self._node_id, worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker if worker_type == 'router': # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_realm'.format(self._node_id, worker_id), realm_id, realm, options=call_options) self.log.info("{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_realm_role'.format(self._node_id, worker_id), realm_id, role_id, role, options=call_options) self.log.info("{}: role '{}' (named '{}') started on realm '{}'".format(worker_logname, role_id, role['name'], realm_id)) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format(self._uplink_no) self._uplink_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_realm_uplink'.format(self._node_id, worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info("{}: uplink '{}' started on realm '{}'".format(worker_logname, uplink_id, realm_id)) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_connection'.format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format(worker_logname, connection_id)) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_component'.format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{}: component '{}' started".format(worker_logname, component_id)) # start transports on router for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_router_transport'.format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format(worker_logname, transport_id)) # setup container worker elif worker_type == 'container': # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical("Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.node.{}.worker.{}.container.on_component_stop'.format(self._node_id, worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_connection'.format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format(worker_logname, connection_id)) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_container_component'.format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker elif worker_type == 'websocket-testee': # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no = 1 yield self._controller.call('crossbar.node.{}.worker.{}.start_websocket_testee_transport'.format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format(worker_logname, transport_id)) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
class CallerTestCase(TestCase): """ Unit tests for L{CallerResource}. """ def setUp(self): # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, u'test_role', default_permissions={ u'uri': u'com.myapp.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) @inlineCallbacks def test_add2(self): """ Test a very basic call where you square root a number. This has one arg, no kwargs, and no authorisation. """ session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u"test_role") session2 = ApplicationSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session2, authrole=u"test_role") resource = CallerResource({}, session2) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b'{"procedure": "com.myapp.sqrt", "args": [2]}') self.assertEqual(request.code, 200) self.assertEqual(json.loads(native_string(request.get_written_data())), {"args": [1.4142135623730951]}) logs = l.get_category("AR202") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["code"], 200) @inlineCallbacks def test_failure(self): """ A failed call returns the error to the client. """ session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u"test_role") session2 = ApplicationSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session2, authrole=u"test_role") resource = CallerResource({}, session2) tests = [ (u"com.myapp.sqrt", (0, ), { u"error": u"wamp.error.runtime_error", u"args": [u"don't ask foolish questions ;)"], u"kwargs": {} }), (u"com.myapp.checkname", ("foo", ), { u"error": u"com.myapp.error.reserved", u"args": [], u"kwargs": {} }), (u"com.myapp.checkname", ("*", ), { u"error": u"com.myapp.error.invalid_length", u"args": [], u"kwargs": { "min": 3, "max": 10 } }), (u"com.myapp.checkname", ("hello", ), { u"error": u"com.myapp.error.mixed_case", u"args": ["hello", "HELLO"], u"kwargs": {} }), (u"com.myapp.compare", (1, 10), { u"error": u"com.myapp.error1", u"args": [9], u"kwargs": {} }), ] for procedure, args, err in tests: with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=dump_json({ "procedure": procedure, "args": args }).encode('utf8')) self.assertEqual(request.code, 200) self.assertEqual( json.loads(native_string(request.get_written_data())), err) logs = l.get_category("AR458") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["code"], 200) # We manually logged the errors; we can flush them from the log self.flushLoggedErrors() @inlineCallbacks def test_cb_failure(self): """ Test that calls with no procedure in the request body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b'{"procedure": "foo"}') self.assertEqual(request.code, 500) self.assertEqual( json.loads(native_string(request.get_written_data())), { "error": "wamp.error.runtime_error", "args": ["Sorry, Crossbar.io has encountered a problem."], "kwargs": {} }) errors = l.get_category("AR500") self.assertEqual(len(errors), 1) # We manually logged the errors; we can flush them from the log self.flushLoggedErrors() @inlineCallbacks def test_no_procedure(self): """ Test that calls with no procedure in the request body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}, body=b"{}") self.assertEqual(request.code, 400) errors = l.get_category("AR455") self.assertEqual(len(errors), 1) self.assertEqual(errors[0]["code"], 400) @inlineCallbacks def test_no_body(self): """ Test that calls with no body are rejected. """ resource = CallerResource({}, None) with LogCapturer() as l: request = yield renderResource( resource, b"/", method=b"POST", headers={b"Content-Type": [b"application/json"]}) self.assertEqual(request.code, 400) errors = l.get_category("AR453") self.assertEqual(len(errors), 1) self.assertEqual(errors[0]["code"], 400)
def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # the node controller realm # self._realm = controller_config.get(u'realm', u'crossbar') # the node's name (must be unique within the management realm when running # in "managed mode") # if 'id' in controller_config: self._node_id = controller_config['id'] self.log.info("Node ID '{node_id}' set from config", node_id=self._node_id) elif 'CDC_ID' in os.environ: self._node_id = u'{}'.format(os.environ['CDC_ID']) self.log.info("Node ID '{node_id}' set from environment variable CDC_ID", node_id=self._node_id) else: self._node_id = u'{}'.format(socket.gethostname()) self.log.info("Node ID '{node_id}' set from hostname", node_id=self._node_id) # standalone vs managed mode # if 'cdc' in controller_config and controller_config['cdc'].get('enabled', False): self._prepare_node_keys() cdc_config = controller_config['cdc'] # CDC connecting transport # if 'transport' in cdc_config: transport = cdc_config['transport'] if 'tls' in transport['endpoint']: if 'hostname' in transport['endpoint']: hostname = transport['endpoint']['tls']['hostname'] else: raise Exception("TLS activated on CDC connection, but 'hostname' not provided") else: hostname = None self.log.warn("CDC transport configuration overridden from node config!") else: transport = { "type": u"websocket", "url": u"wss://devops.crossbario.com/ws", "endpoint": { "type": u"tcp", "host": u"devops.crossbario.com", "port": 443, "timeout": 5, "tls": { "hostname": u"devops.crossbario.com" } } } hostname = u'devops.crossbario.com' # CDC management realm # if 'realm' in cdc_config: realm = cdc_config['realm'] self.log.info("CDC management realm '{realm}' set from config", realm=realm) elif 'CDC_REALM' in os.environ: realm = u"{}".format(os.environ['CDC_REALM']).strip() self.log.info("CDC management realm '{realm}' set from enviroment variable CDC_REALM", realm=realm) else: raise Exception("CDC management realm not set - either 'realm' must be set in node configuration, or in CDC_REALM enviroment variable") # CDC authentication credentials (for WAMP-CRA) # authid = self._node_id if 'secret' in cdc_config: authkey = cdc_config['secret'] self.log.info("CDC authentication secret loaded from config") elif 'CDC_SECRET' in os.environ: authkey = u"{}".format(os.environ['CDC_SECRET']).strip() self.log.info("CDC authentication secret loaded from environment variable CDC_SECRET") else: raise Exception("CDC authentication secret not set - either 'secret' must be set in node configuration, or in CDC_SECRET enviroment variable") # extra info forwarded to CDC client session # extra = { 'node': self, 'onready': Deferred(), 'onexit': Deferred(), 'authid': authid, 'authkey': authkey } runner = ApplicationRunner( url=transport['url'], realm=realm, extra=extra, ssl=optionsForClientTLS(hostname) if hostname else None, ) try: self.log.info("Connecting to CDC at '{url}' ..", url=transport['url']) yield runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._manager = yield extra['onready'] except Exception as e: raise Exception("Could not connect to CDC - {}".format(e)) # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED] self.log.info("Connected to Crossbar.io DevOps Center (CDC)! Your node runs in managed mode.") else: self._manager = None # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info("Overriding default node shutdown triggers with {} from node config".format(controller_options['shutdown'])) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {}".format(self._node_shutdown_triggers)) # router and factory that creates router sessions # self._router_factory = RouterFactory(self._node_id) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._manager: self._bridge_session = NodeManagementBridgeSession(cfg, self, self._manager) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) # Notify systemd that crossbar is fully up and running # This has no effect on non-systemd platforms try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
class TestBrokerPublish(unittest.TestCase): """ Tests for crossbar.router.broker.Broker """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, fail, msg): errors.append((fail, msg)) session = TestSession(types.ComponentConfig(u'realm1')) from crossbar.router.session import RouterApplicationSession # Note to self: original code was logging directly in # RouterApplicationSession -- which *may* actually be better? # or not... with mock.patch.object(RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session) if True: self.assertEqual(1, len(errors), "Didn't see our error") self.assertEqual(the_exception, errors[0][0].value) else: # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertEqual(call[1][0].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # do the test; should call onHello which is now "boom", above session.onMessage(msg) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors), "Expected just one error: {}".format(errors)) self.assertTrue(the_exception in [fail.value for fail in errors]) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): d2 = self.subscribe(lambda: None, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u'test_role') return d def test_publish_closed_session(self): """ ensure a session doesn't get Events if it's closed (see also issue #431) """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() router = mock.MagicMock() broker = Broker(router) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, u'test.topic') # simulate the session state we want, which is that a # transport is connected (._transport != None) but there # _session_id *is* None (not joined yet, or left already) self.assertIs(None, session0._session_id) session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') session1._session_id = 1234 # "from" session should look connected + joined session1._transport = mock.MagicMock() session1._transport.channel_id = b'aaaabeef' # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session1, pubmsg) # neither session should have sent anything on its transport self.assertEquals(session0._transport.method_calls, []) self.assertEquals(session1._transport.method_calls, [])
class Node(object): """ Crossbar.io Community node personality. """ # http://patorjk.com/software/taag/#p=display&h=1&f=Stick%20Letters&t=Crossbar.io BANNER = r""" __ __ __ __ __ __ __ __ / `|__)/ \/__`/__`|__) /\ |__) |/ \ \__,| \\__/.__/.__/|__)/~~\| \. |\__/ """ PERSONALITY = "Crossbar.io COMMUNITY" NODE_CONTROLLER = NodeControllerSession ROUTER_SERVICE = RouterServiceSession # A Crossbar.io node is the running a controller process and one or multiple # worker processes. # A single Crossbar.io node runs exactly one instance of this class, hence # this class can be considered a system singleton. log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # when running in managed mode, this will hold the uplink session to CFC self._manager = None # the node's management realm when running in managed mode (this comes from CFC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CFC!) self._node_id = None # node extra when running in managed mode (this comes from CFC!) self._node_extra = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def maybe_generate_key(self, cbdir, privkey_path=u'key.priv', pubkey_path=u'key.pub'): privkey_path = os.path.join(cbdir, privkey_path) pubkey_path = os.path.join(cbdir, pubkey_path) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_keyfile(privkey_path, private=True) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519' ]: if tag not in priv_tags: raise Exception( "Corrupt node private key file {} - {} tag not found". format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = SigningKey(privkey_hex, encoder=HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path)) if os.path.exists(pubkey_path): pub_tags = _parse_keyfile(pubkey_path, private=False) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519' ]: if tag not in pub_tags: raise Exception( "Corrupt node public key file {} - {} tag not found" .format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519" ).format(pubkey_path)) else: self.log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) self.log.debug("Node key already exists (public key: {hex})", hex=pubkey_hex) else: # node private key does not yet exist: generate one privkey = SigningKey.generate() privkey_hex = privkey.encode(encoder=HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) self.log.info("New node key pair generated!") # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path ).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) self.log.info("File permissions on node public key fixed!") if os.stat(privkey_path ).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) self.log.info("File permissions on node private key fixed!") self._node_key = cryptosign.SigningKey(privkey) return pubkey_hex def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in empty config. """ if configfile: configpath = os.path.abspath(os.path.join(self._cbdir, configfile)) self.log.debug('Loading node configuration from "{configpath}" ..', configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath) self.log.info('Node configuration loaded from "{configpath}"', configpath=configpath) else: self._config = {u'version': 2, u'controller': {}, u'workers': []} checkconfig.check_config(self._config) self.log.info('Node configuration loaded from built-in config.') def _add_global_roles(self): self.log.info('No extra node router roles') def _add_worker_role(self, worker_auth_role, options): worker_role_config = { u"name": worker_auth_role, u"permissions": [ # the worker requires these permissions to work: { # worker_auth_role: "crossbar.worker.worker-001" u"uri": worker_auth_role, u"match": u"prefix", u"allow": { u"call": False, u"register": True, u"publish": True, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True }, { u"uri": u"crossbar.get_info", u"match": u"exact", u"allow": { u"call": True, u"register": False, u"publish": False, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True } ] } self._router_factory.add_role(self._realm, worker_role_config) def _drop_worker_role(self, worker_auth_role): self._router_factory.drop_role(self._realm, worker_auth_role) def _extend_worker_args(self, args, options): pass def _add_extra_controller_components(self, controller_options): pass def _set_shutdown_triggers(self, controller_options): # allow to override node shutdown triggers # if 'shutdown' in controller_options: self._node_shutdown_triggers = controller_options['shutdown'] self.log.info( "Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers) else: self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) @inlineCallbacks def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration set") # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # local node management router # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles # self._add_global_roles() # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component # self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components # self._add_extra_controller_components(controller_options) # setup Node shutdown triggers # self._set_shutdown_triggers(controller_options) panic = False try: # startup the node personality .. yield self._startup() # .. and notify systemd that we are fully up and running try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: # do nothing on non-systemd platforms pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True self.log.failure('Could not startup node: {log_failure.value}') if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass def _startup(self): return self._configure_node_from_config(self._config) @inlineCallbacks def _configure_node_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from local configuration ...') # call options we use to call into the local node management API call_options = CallOptions() # fake call details we use to call into the local node management API call_details = CallDetails(caller=0) # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers workers = config.get('workers', []) if len(workers): self.log.info('Starting {nworkers} workers ...', nworkers=len(workers)) else: self.log.info('No workers configured!') for worker in workers: # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type - a type of working process from the following fixed list worker_type = worker['type'] assert (worker_type in ['router', 'container', 'guest', 'websocket-testee']) # set logname depending on worker type if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # any worker specific options worker_options = worker.get('options', {}) # native worker processes: router, container, websocket-testee if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container( worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee( worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff if 'pythonpath' in worker_options: added_paths = yield self._controller.call( u'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( u'crossbar.worker.{}.set_cpu_affinity'.format( worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call( u'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker if worker_type == 'router': # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_realm'.format( worker_id), realm_id, realm, options=call_options) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_realm_role'. format(worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format( self._uplink_no) self._uplink_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_realm_uplink' .format(worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_component'. format(worker_id), component_id, component, options=call_options) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_transport'. format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) # setup container worker elif worker_type == 'container': # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = u'crossbar.worker.{}.container.on_component_stop'.format( worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_component'.format( worker_id), component_id, component, options=call_options) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker elif worker_type == 'websocket-testee': # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no = 1 yield self._controller.call( u'crossbar.worker.{}.start_websocket_testee_transport'. format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error") self.log.info('Local node configuration applied successfully!')
class TestDealer(unittest.TestCase): """ """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory() # start a realm self.realm = RouterRealm(u'realm-001', {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass @defer.inlineCallbacks def test_outstanding_invoke(self): """ When a call is pending and the callee goes away, it cancels the in-flight call """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) rap = RouterApplicationSession(session, self.router_factory) rap.send( message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) # we can retrieve the Registration via # session.mock_calls[-1][1][0] if req'd # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({ u'allow': True, u'disclose': True })) rap.send(message.Call(42, u'foo')) orig = rap.send d = defer.Deferred() rap.send(message.Goodbye()) def wrapper(*args, **kw): d.callback(args[0]) return orig(*args, **kw) rap.send = wrapper # we can do this *after* the call to send() the Goodbye # (above) because it takes a reactor-turn to actually # process the cancel/errors etc -- hence the Deferred and # yield in this test... msg = yield d self.assertEqual(42, msg.request) self.assertEqual(u'wamp.error.canceled', msg.error)
def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration set") # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # local node management router # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles # self._add_global_roles() # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component # self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components # self._add_extra_controller_components(controller_options) # setup Node shutdown triggers # self._set_shutdown_triggers(controller_options) panic = False try: # startup the node personality .. yield self._startup() # .. and notify systemd that we are fully up and running try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: # do nothing on non-systemd platforms pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True self.log.failure('Could not startup node: {log_failure.value}') if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
class TestBrokerPublish(unittest.TestCase): """ Tests for crossbar.router.broker.Broker """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth(self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, fail, msg): errors.append((fail, msg)) session = TestSession(types.ComponentConfig(u'realm1')) from crossbar.router.session import RouterApplicationSession # Note to self: original code was logging directly in # RouterApplicationSession -- which *may* actually be better? # or not... with mock.patch.object(RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session) if True: self.assertEqual(1, len(errors), "Didn't see our error") self.assertEqual(the_exception, errors[0][0].value) else: # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertEqual(call[1][0].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # do the test; should call onHello which is now "boom", above session.onMessage(msg) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors), "Expected just one error: {}".format(errors)) self.assertTrue(the_exception in [fail.value for fail in errors]) def test_router_session_goodbye_custom_message(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send( message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) leaves = [call for call in session.mock_calls if call[0] == 'onLeave'] self.assertEqual(1, len(leaves)) details = leaves[0][1][0] self.assertEqual(u'wamp.reason.logout', details.reason) self.assertEqual(u'some custom message', details.message) def test_router_session_goodbye_onLeave_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() the_exception = RuntimeError("onLeave fails") def boom(*args, **kw): raise the_exception session.onLeave = mock.Mock(side_effect=boom) session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send( message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_goodbye_fire_disconnect_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() the_exception = RuntimeError("sad times at ridgemont high") def boom(*args, **kw): if args[0] == 'disconnect': return defer.fail(the_exception) return defer.succeed(None) session.fire = mock.Mock(side_effect=boom) session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send( message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_lifecycle(self): """ We see all 'lifecycle' notifications. """ from crossbar.router.session import RouterApplicationSession def mock_fire(name, *args, **kw): fired.append(name) return defer.succeed(None) fired = [] session = mock.Mock() session._realm = u'realm' session.fire = mock.Mock(side_effect=mock_fire) router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) # we never fake out the 'Welcome' message, so there will be no # 'ready' notification... rap.send( message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) self.assertTrue('connect' in fired) self.assertTrue('join' in fired) self.assertTrue('ready' in fired) self.assertTrue('leave' in fired) self.assertTrue('disconnect' in fired) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): d2 = self.subscribe(lambda: None, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u'test_role') return d def test_publish_closed_session(self): """ ensure a session doesn't get Events if it's closed (see also issue #431) """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() router = mock.MagicMock() broker = Broker(router, reactor) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, u'test.topic') # simulate the session state we want, which is that a # transport is connected (._transport != None) but there # _session_id *is* None (not joined yet, or left already) self.assertIs(None, session0._session_id) session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') session1._session_id = 1234 # "from" session should look connected + joined session1._transport = mock.MagicMock() session1._transport.channel_id = b'aaaabeef' # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session1, pubmsg) # neither session should have sent anything on its transport self.assertEquals(session0._transport.method_calls, []) self.assertEquals(session1._transport.method_calls, [])
class TestBrokerPublish(unittest.TestCase): """ Tests for crossbar.router.broker.Broker """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory('node1', 'router1', None) # start a realm self.realm = RouterRealm(None, None, {'name': 'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get('realm1') self.router.add_role( RouterRoleStaticAuth(self.router, 'test_role', default_permissions={ 'uri': 'com.example.', 'match': 'prefix', 'allow': { 'call': True, 'register': True, 'publish': True, 'subscribe': True, } })) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig('realm1')) self.session_factory.add(session, self.router) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, fail, msg): errors.append((fail, msg)) session = TestSession(types.ComponentConfig('realm1')) from crossbar.router.session import RouterApplicationSession # Note to self: original code was logging directly in # RouterApplicationSession -- which *may* actually be better? # or not... with mock.patch.object(RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session, self.router) if True: self.assertEqual(1, len(errors), "Didn't see our error") self.assertEqual(the_exception, errors[0][0].value) else: # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertEqual(call[1][0].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello('realm1', dict(caller=role.RoleCallerFeatures())) with mock.patch.object(session, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'warn') self.assertTrue('err' in call[2]) self.assertEqual(call[2]['err'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate('bogus signature') # do the test; should call onHello which is now "boom", above session.onMessage(msg) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors), "Expected just one error: {}".format(errors)) self.assertTrue(the_exception in [fail.value for fail in errors]) def test_router_session_goodbye_custom_message(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = ApplicationSession() session.onLeave = mock.Mock() session._realm = 'realm' router = Router(factory=mock.Mock(), realm=RouterRealm( controller=None, id='realm', config=dict(name='realm'), )) rap = RouterApplicationSession(session, router) rap.send(message.Goodbye('wamp.reason.logout', 'some custom message')) leaves = session.onLeave.mock_calls self.assertEqual(1, len(leaves)) details = leaves[0][1][0] self.assertEqual('wamp.reason.logout', details.reason) self.assertEqual('some custom message', details.message) def test_router_session_goodbye_onLeave_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = ApplicationSession() the_exception = RuntimeError("onLeave fails") def boom(*args, **kw): raise the_exception session.onLeave = mock.Mock(side_effect=boom) session._realm = 'realm' router = Router(factory=mock.Mock(), realm=RouterRealm( controller=None, id='realm', config=dict(name='realm'), )) rap = RouterApplicationSession(session, router) rap.send(message.Goodbye('wamp.reason.logout', 'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_goodbye_fire_disconnect_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = ApplicationSession() the_exception = RuntimeError("sad times at ridgemont high") def boom(*args, **kw): if args[0] == 'disconnect': return defer.fail(the_exception) return defer.succeed(None) session.fire = mock.Mock(side_effect=boom) session._realm = 'realm' router = Router(factory=mock.Mock(), realm=RouterRealm( controller=None, id='realm', config=dict(name='realm'), )) rap = RouterApplicationSession(session, router) rap.send(message.Goodbye('wamp.reason.logout', 'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_lifecycle(self): """ We see all 'lifecycle' notifications. """ from crossbar.router.session import RouterApplicationSession def mock_fire(name, *args, **kw): fired.append(name) return defer.succeed(None) fired = [] session = ApplicationSession() session._realm = 'realm' session.fire = mock.Mock(side_effect=mock_fire) router = Router(factory=mock.Mock(), realm=RouterRealm( controller=None, id='realm', config=dict(name='realm'), )) rap = RouterApplicationSession(session, router) # we never fake out the 'Welcome' message, so there will be no # 'ready' notification... rap.send(message.Goodbye('wamp.reason.logout', 'some custom message')) self.assertTrue('connect' in fired) self.assertTrue('join' in fired) self.assertTrue('ready' in fired) self.assertTrue('leave' in fired) self.assertTrue('disconnect' in fired) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): d2 = self.subscribe(lambda: None, 'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig('realm1')) self.session_factory.add(session, self.router, authrole='test_role') return d def test_publish_closed_session(self): """ ensure a session doesn't get Events if it's closed (see also issue #431) """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() router = mock.MagicMock() router.new_correlation_id = lambda: 'fake correlation id' broker = Broker(router, reactor) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, 'test.topic') # simulate the session state we want, which is that a # transport is connected (._transport != None) but there # _session_id *is* None (not joined yet, or left already) self.assertIs(None, session0._session_id) session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') session1._session_id = 1234 # "from" session should look connected + joined session1._transport = mock.MagicMock() session1._transport.channel_id = b'aaaabeef' # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, 'test.topic') broker.processPublish(session1, pubmsg) # neither session should have sent anything on its transport self.assertEquals(session0._transport.method_calls, []) self.assertEquals(session1._transport.method_calls, []) def test_publish_traced_events(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: 'fake correlation id' router.is_traced = True broker = Broker(router, reactor) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, 'test.topic') broker._subscription_map.add_observer(session1, 'test.topic') session0._session_id = 1000 session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') session1._session_id = 1001 session1._transport = mock.MagicMock() session1._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') session2._session_id = 1002 session2._transport = mock.MagicMock() session2._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, 'test.topic') broker.processPublish(session2, pubmsg) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2] ] self.assertEqual(2, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertTrue(events[1].correlation_is_last) def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: 'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, 'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock( return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock( return_value=txaio.create_future_success( dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, 'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last) def test_subscribe_unsubscribe(self): """ Make sure the wamp.* event sequence delivered as part of a subscribe/unsubscribe sequence is correct and contains the correct session id and subscription id values. """ router = self.router test = self class Session(mock.MagicMock): _private = [] _events = [] _authrole = 'trusted' _session_id = 0 def send(self, *args, **argv): self._private.append(args[0]) def publish(self, *args, **argv): self._events.append([args, argv]) class TestSession(ApplicationSession): def __init__(self, *args, **kw): super().__init__(*args, **kw) self._service_session = Session() self._router = router def onJoin(self, details): self._service_session._session_id = details.session router.attach(self._service_session) router._broker._router._realm.session = self._service_session subscription = message.Subscribe( self._service_session._session_id, 'com.example.test1') router._broker.processSubscribe(self._service_session, subscription) subscription = message.Subscribe( self._service_session._session_id, 'com.example.test2') router._broker.processSubscribe(self._service_session, subscription) subscription = message.Subscribe( self._service_session._session_id, 'com.example.test3') router._broker.processSubscribe(self._service_session, subscription) subscriptions = [] for obj in list(self._service_session._private): subscription = message.Unsubscribe( self._service_session._session_id, subscription=obj.subscription) router._broker.processUnsubscribe(self._service_session, subscription) subscriptions.append(obj.subscription) def all_done(): created = list(subscriptions) subscribes = list(subscriptions) unsubscribes = list(subscriptions) deletes = list(subscriptions) for args, argv in self._service_session._events: if args[0] == 'wamp.subscription.on_create': test.assertEqual( args[1], self._service_session._session_id, 'on_create: session id is incorrect!') test.assertTrue( args[2]['id'] in created, 'on_create: subscription id is incorrect!') created.remove(args[2]['id']) if args[0] == 'wamp.subscription.on_subscribe': test.assertEqual( args[1], self._service_session._session_id, 'on_subscribe: session id is incorrect!') test.assertTrue( args[2] in subscribes, 'on_subscribe: subscription id is incorrect!') subscribes.remove(args[2]) if args[0] == 'wamp.subscription.on_unsubscribe': test.assertEqual( args[1], self._service_session._session_id, 'on_unsubscribe: session id is incorrect!') test.assertTrue( args[2] in unsubscribes, 'on_unsubscribe: subscription id is incorrect!' ) unsubscribes.remove(args[2]) if args[0] == 'wamp.subscription.on_delete': test.assertEqual( args[1], self._service_session._session_id, 'on_delete: session id is incorrect!') test.assertTrue( args[2] in deletes, 'on_delete: subscription id is incorrect!') deletes.remove(args[2]) test.assertEqual( len(created), 0, 'incorrect response sequence for on_create') test.assertEqual( len(subscribes), 0, 'incorrect response sequence for on_subscribe') test.assertEqual( len(unsubscribes), 0, 'incorrect response sequence for on_unsubscribe') test.assertEqual( len(deletes), 0, 'incorrect response sequence for on_delete') reactor.callLater(0, all_done) session = TestSession(types.ComponentConfig('realm1')) self.session_factory.add(session, self.router, authrole='trusted') def test_subscribe_detach(self): """ Make sure the wamp.* event sequence delivered as part of a subscribe/detach sequence is correct and contains the correct session id and subscription id values. """ router = self.router test = self class Session(mock.MagicMock): """ Mock the session object, this is key to capturing all the replies and publications. We get all replies in _private and publications in events, so we can issue the request we need to test, then check at the end _events contains the list of pub's we are expecting. """ _private = [] _events = [] _authrole = 'trusted' _session_id = 0 def send(self, *args, **argv): self._private.append(args[0]) def publish(self, *args, **argv): self._events.append([args, argv]) class TestSession(ApplicationSession): def __init__(self, *args, **kw): super().__init__(*args, **kw) self._service_session = Session() self._router = router def onJoin(self, details): self._service_session._session_id = details.session router.attach(self._service_session) router._broker._router._realm.session = self._service_session subscription = message.Subscribe( self._service_session._session_id, 'com.example.test1') router._broker.processSubscribe(self._service_session, subscription) subscription = message.Subscribe( self._service_session._session_id, 'com.example.test2') router._broker.processSubscribe(self._service_session, subscription) subscription = message.Subscribe( self._service_session._session_id, 'com.example.test3') router._broker.processSubscribe(self._service_session, subscription) subscriptions = [] for obj in list(self._service_session._private): subscriptions.append(obj.subscription) router.detach(self._service_session) def all_done(): # # These lists are initialised with the subscription id's we've generated # with out subscribe sequence, the following routines should decrement each # list to exactly zero. # created = list(subscriptions) subscribes = list(subscriptions) unsubscribes = list(subscriptions) deletes = list(subscriptions) for args, argv in self._service_session._events: if args[0] == 'wamp.subscription.on_create': test.assertEqual( args[1], self._service_session._session_id, 'on_create: session id is incorrect!') test.assertTrue( args[2]['id'] in created, 'on_create: subscription id is incorrect!') created.remove(args[2]['id']) if args[0] == 'wamp.subscription.on_subscribe': test.assertEqual( args[1], self._service_session._session_id, 'on_subscribe: session id is incorrect!') test.assertTrue( args[2] in subscribes, 'on_subscribe: subscription id is incorrect!') subscribes.remove(args[2]) if args[0] == 'wamp.subscription.on_unsubscribe': test.assertEqual( args[1], self._service_session._session_id, 'on_unsubscribe: session id is incorrect!') test.assertTrue( args[2] in unsubscribes, 'on_unsubscribe: subscription id is incorrect!' ) unsubscribes.remove(args[2]) if args[0] == 'wamp.subscription.on_delete': test.assertEqual( args[1], self._service_session._session_id, 'on_delete: session id is incorrect!') test.assertTrue( args[2] in deletes, 'on_delete: subscription id is incorrect!') deletes.remove(args[2]) test.assertEqual( len(created), 0, 'incorrect response sequence for on_create') test.assertEqual( len(subscribes), 0, 'incorrect response sequence for on_subscribe') test.assertEqual( len(unsubscribes), 0, 'incorrect response sequence for on_unsubscribe') test.assertEqual( len(deletes), 0, 'incorrect response sequence for on_delete') reactor.callLater(0, all_done) session = TestSession(types.ComponentConfig('realm1')) self.session_factory.add(session, self.router, authrole='trusted')
class TestEmbeddedSessions(unittest.TestCase): """ Test cases for application session running embedded in router. """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(u'mynode') # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything default_permissions = { u'uri': u'', u'match': u'prefix', u'call': True, u'register': True, u'publish': True, u'subscribe': True } router = self.router_factory.get(u'realm1') router.add_role(RouterRoleStaticAuth(router, None, default_permissions=default_permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from _RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, *args, **kw): errors.append((args, kw)) session = TestSession(types.ComponentConfig(u'realm1')) # in this test, we are just looking for onUserError to get # called so we don't need to patch the logger. this should # call onJoin, triggering our error self.session_factory.add(session) # check we got the right log.failure() call self.assertTrue(len(errors) > 0, "expected onUserError call") fail = errors[0][0][0] self.assertTrue(fail.value == the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): # noinspection PyUnusedLocal def on_event(*arg, **kwargs): pass d2 = self.subscribe(on_event, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d
def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. The node keys (``load_keys``) and configuration (``load_config``) has to be loaded before starting the node. This is the _third_ function being called after the Node has been instantiated. """ self.log.info('Starting {personality} node {method}', personality=self.personality.NAME, method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: raise Exception("No node configuration set") # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = u'{}'.format(socket.gethostname()).lower() _node_id_source = 'hostname' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) # local node management router self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(self._controller, None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_options) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # startup the node personality .. yield self.personality.Node.boot(self) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = {'shutdown_complete': self._shutdown_complete} returnValue(res)
def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # Jinja2 templates for Web (like WS status page et al) # templates_dir = os.path.abspath(pkg_resources.resource_filename("crossbar", "web/templates")) self.log.debug("Using Web templates from {templates_dir}", templates_dir=templates_dir) self._templates = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_dir)) # factory for producing (per-realm) routers self._router_factory = RouterFactory() # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: transport ID -> RouterTransport self.transports = {} # map: link ID -> RouterLink self.links = {} # map: component ID -> RouterComponent self.components = {} # the procedures registered procs = [ 'get_router_realms', 'start_router_realm', 'stop_router_realm', 'get_router_realm_roles', 'start_router_realm_role', 'stop_router_realm_role', 'get_router_components', 'start_router_component', 'stop_router_component', 'get_router_transports', 'start_router_transport', 'stop_router_transport', 'get_router_links', 'start_router_link', 'stop_router_link' ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # NativeWorkerSession.publish_ready() yield self.publish_ready()
class TestEmbeddedSessions(unittest.TestCase): """ Test cases for application session running embedded in router. """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory() # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything permissions = RouterPermissions('', True, True, True, True, True) router = self.router_factory.get(u'realm1') router.add_role( RouterRoleStaticAuth(router, None, default_permissions=permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def _test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from _RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception session = TestSession(types.ComponentConfig(u'realm1')) from crossbar.router.session import _RouterApplicationSession # execute, first patching-out the logger so we can see that # log.failure() was called when our exception triggers. with mock.patch.object(_RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('log_failure' in call[2]) self.assertEqual(call[2]['log_failure'].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('log_failure' in call[2]) self.assertEqual(call[2]['log_failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory( ) # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('log_failure' in call[2]) self.assertEqual(call[2]['log_failure'].value, the_exception) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): # noinspection PyUnusedLocal def on_event(*arg, **kwargs): pass d2 = self.subscribe(on_event, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's management realm when running in managed mode (this comes from CDC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CDC!) self._node_id = None # node extra when running in managed mode (this comes from CDC!) self._node_extra = None # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when running in managed mode, this will hold the bridge session # attached to the local management router self._bridge_session = None # when running in managed mode, this will hold the uplink session to CDC self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # map from router worker IDs to self._realm_templates = {} # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def maybe_generate_key(self, cbdir, privkey_path=u'key.priv', pubkey_path=u'key.pub'): privkey_path = os.path.join(cbdir, privkey_path) pubkey_path = os.path.join(cbdir, pubkey_path) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_keyfile(privkey_path, private=True) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519']: if tag not in priv_tags: raise Exception("Corrupt node private key file {} - {} tag not found".format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = SigningKey(privkey_hex, encoder=HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) if os.path.exists(pubkey_path): pub_tags = _parse_keyfile(pubkey_path, private=False) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519']: if tag not in pub_tags: raise Exception("Corrupt node public key file {} - {} tag not found".format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) else: self.log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) self.log.debug("Node key already exists (public key: {hex})", hex=pubkey_hex) else: # node private key does not yet exist: generate one privkey = SigningKey.generate() privkey_hex = privkey.encode(encoder=HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) self.log.info("New node key pair generated!") # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) self.log.info("File permissions on node public key fixed!") if os.stat(privkey_path).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) self.log.info("File permissions on node private key fixed!") self._node_key = cryptosign.SigningKey(privkey) return pubkey_hex def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } checkconfig.check_config(self._config) self.log.info("Node configuration loaded from built-in config.") @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") if not cdc_mode and not self._config.get("controller", {}) and not self._config.get("workers", {}): self.log.warn( ("You seem to have no controller config or workers, nor are " "starting up in CDC mode. Check your config exists, or pass " "--cdc to `crossbar start`.")) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass return # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory(self._router_factory) # create a new router for the realm # rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') # add a router bridge session when running in managed mode # if cdc_mode: self._bridge_session = NodeManagementBridgeSession(cfg) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # Node shutdown mode # if cdc_mode: # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED] else: # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info("Overriding default node shutdown triggers with {triggers} from node config", triggers=controller_options['shutdown']) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) # add the node controller singleton session # self._controller = NodeControllerSession(self) self._router_session_factory.add(self._controller, authrole=u'trusted') # detect WAMPlets (FIXME: remove this!) # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: # startup the node from local node configuration # yield self._startup(self._config) # connect to CDC when running in managed mode # if cdc_mode: cdc_config = controller_config.get('cdc', { # CDC connecting transport u'transport': { u'type': u'websocket', u'url': u'wss://cdc.crossbario.com/ws', u'endpoint': { u'type': u'tcp', u'host': u'cdc.crossbario.com', u'port': 443, u'timeout': 5, u'tls': { u'hostname': u'cdc.crossbario.com' } } } }) transport = cdc_config[u'transport'] hostname = None if u'tls' in transport[u'endpoint']: transport[u'endpoint'][u'tls'][u'hostname'] runner = ApplicationRunner( url=transport['url'], realm=None, extra=None, ssl=optionsForClientTLS(hostname) if hostname else None, ) def make(config): # extra info forwarded to CDC client session extra = { 'node': self, 'on_ready': Deferred(), 'on_exit': Deferred(), 'node_key': self._node_key, } @inlineCallbacks def on_ready(res): self._manager, self._management_realm, self._node_id, self._node_extra = res if self._bridge_session: try: yield self._bridge_session.attach_manager(self._manager, self._management_realm, self._node_id) status = yield self._manager.call(u'cdc.remote.status@1') except: self.log.failure() else: self.log.info('Connected to CDC for management realm "{realm}" (current time is {now})', realm=self._management_realm, now=status[u'now']) else: self.log.warn('Uplink CDC session established, but no bridge session setup!') @inlineCallbacks def on_exit(res): if self._bridge_session: try: yield self._bridge_session.detach_manager() except: self.log.failure() else: self.log.info('Disconnected from CDC for management realm "{realm}"', realm=self._management_realm) else: self.log.warn('Uplink CDC session lost, but no bridge session setup!') self._manager, self._management_realm, self._node_id, self._node_extra = None, None, None, None extra['on_ready'].addCallback(on_ready) extra['on_exit'].addCallback(on_exit) config = ComponentConfig(extra=extra) session = NodeManagementSession(config) return session self.log.info("Connecting to CDC at '{url}' ..", url=transport[u'url']) yield runner.run(make, start_reactor=False, auto_reconnect=True) # Notify systemd that crossbar is fully up and running # (this has no effect on non-systemd platforms) try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from local configuration ...') # call options we use to call into the local node management API call_options = CallOptions() # fake call details we use to call into the local node management API call_details = CallDetails(caller=0) # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers workers = config.get('workers', []) if len(workers): self.log.info('Starting {nworkers} workers ...', nworkers=len(workers)) else: self.log.info('No workers configured!') for worker in workers: # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type - a type of working process from the following fixed list worker_type = worker['type'] assert(worker_type in ['router', 'container', 'guest', 'websocket-testee']) # set logname depending on worker type if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # any worker specific options worker_options = worker.get('options', {}) # native worker processes: router, container, websocket-testee if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container(worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee(worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff if 'pythonpath' in worker_options: added_paths = yield self._controller.call('crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call('crossbar.worker.{}.set_cpu_affinity'.format(worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call('crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker if worker_type == 'router': # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=call_options) self.log.info("{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_realm_role'.format(worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format(self._uplink_no) self._uplink_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_realm_uplink'.format(worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call('crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=call_options) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no += 1 yield self._controller.call('crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) # setup container worker elif worker_type == 'container': # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical("Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.worker.{}.container.on_component_stop'.format(worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call('crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call('crossbar.worker.{}.start_container_component'.format(worker_id), component_id, component, options=call_options) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker elif worker_type == 'websocket-testee': # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no = 1 yield self._controller.call('crossbar.worker.{}.start_websocket_testee_transport'.format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error") self.log.info('Local node configuration applied.')
class TestEmbeddedSessions(unittest.TestCase): """ Test cases for application session running embedded in router. """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory() # start a realm self.router_factory.start_realm(RouterRealm(None, {u'name': u'realm1'})) # allow everything permissions = RouterPermissions('', True, True, True, True, True) router = self.router_factory.get(u'realm1') router.add_role(RouterRoleStaticAuth(router, None, default_permissions=permissions)) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): # noinspection PyUnusedLocal def on_event(*arg, **kwargs): pass d2 = self.subscribe(on_event, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d
class Node(object): """ Crossbar.io Standalone node personality. """ NODE_CONTROLLER = NodeController ROUTER_SERVICE = RouterServiceAgent CONFIG_SOURCE_DEFAULT = 1 CONFIG_SOURCE_EMPTY = 2 CONFIG_SOURCE_LOCALFILE = 3 CONFIG_SOURCE_XBRNETWORK = 4 CONFIG_SOURCE_TO_STR = { 1: 'default', 2: 'empty', 3: 'localfile', 4: 'xbrnetwork', } # A Crossbar.io node is the running a controller process and one or multiple # worker processes. # A single Crossbar.io node runs exactly one instance of this class, hence # this class can be considered a system singleton. log = make_logger() def __init__(self, personality, cbdir=None, reactor=None, native_workers=None, options=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: :class:`twisted.internet.reactor` or None """ self.personality = personality self.options = options or NodeOptions() self._native_workers = personality.native_workers # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # allow overriding to add (or remove) native-worker types if native_workers is not None: self._native_workers = native_workers # local node management router self._router_factory = None # session factory for node management router self._router_session_factory = None # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key :class:`autobahn.wamp.cryptosign.SigningKey` self._node_key = None # when running in managed mode, this will hold the session to CFC self._manager = None # the node's management realm when running in managed mode (this comes from CFC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CFC!) self._node_id = None # node extra when running in managed mode (this comes from CFC!) self._node_extra = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] # will be filled with a Deferred in start(). the Deferred will fire when # the node has shut down, and the result signals if shutdown was clean self._shutdown_complete = None # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._webservice_no = 1 self._component_no = 1 @property def realm(self): return self._realm @property def key(self): """ Returns the node (private signing) key pair. :return: The node key. :rtype: :class:`autobahn.wamp.cryptosign.SigningKey` """ return self._node_key def load_keys(self, cbdir): """ Load node public-private key pair from key files, possibly generating a new key pair if none exists. This is the _first_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ was_new, self._node_key = _maybe_generate_key(cbdir) return was_new def load_config(self, configfile=None, default=None): """ Check and load the node configuration from: * from ``.crossbar/config.json`` or * from built-in (empty) default configuration This is the _second_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ if configfile: config_path = os.path.abspath(os.path.join(self._cbdir, configfile)) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = self.personality.check_config_file( self.personality, config_path) config_source = Node.CONFIG_SOURCE_LOCALFILE else: config_path = None if default: self._config = default config_source = Node.CONFIG_SOURCE_DEFAULT else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } config_source = Node.CONFIG_SOURCE_EMPTY self.personality.check_config(self.personality, self._config) return config_source, config_path def _add_global_roles(self): self.log.info('No extra node router roles') def _add_worker_role(self, worker_auth_role, options): worker_role_config = { u"name": worker_auth_role, u"permissions": [ # the worker requires these permissions to work: { # worker_auth_role: "crossbar.worker.worker-001" u"uri": worker_auth_role, u"match": u"prefix", u"allow": { u"call": False, u"register": True, u"publish": True, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True }, { u"uri": u"crossbar.get_status", u"match": u"exact", u"allow": { u"call": True, u"register": False, u"publish": False, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True } ] } self._router_factory.add_role(self._realm, worker_role_config) def _drop_worker_role(self, worker_auth_role): self._router_factory.drop_role(self._realm, worker_auth_role) def _extend_worker_args(self, args, options): pass def _add_extra_controller_components(self, controller_options): pass def _set_shutdown_triggers(self, controller_options): # allow to override node shutdown triggers # if 'shutdown' in controller_options: self._node_shutdown_triggers = controller_options['shutdown'] self.log.info( "Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers) else: self._node_shutdown_triggers = [NODE_SHUTDOWN_ON_WORKER_EXIT] self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) def stop(self): self._controller._shutdown_was_clean = True return self._controller.shutdown() @inlineCallbacks def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. The node keys (``load_keys``) and configuration (``load_config``) has to be loaded before starting the node. This is the _third_ function being called after the Node has been instantiated. """ self.log.info('Starting {personality} node {method}', personality=self.personality.NAME, method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: raise Exception("No node configuration set") # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = u'{}'.format(socket.gethostname()).lower() _node_id_source = 'hostname' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) # local node management router self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(self._controller, None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_options) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # startup the node personality .. yield self.personality.Node.boot(self) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = {'shutdown_complete': self._shutdown_complete} returnValue(res) # returnValue(self._shutdown_complete) def boot(self): self.log.info('Booting node {method}', method=hltype(Node.boot)) return self.boot_from_config(self._config) @inlineCallbacks def boot_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ # get controller configuration subpart controller = config.get('controller', {}) parallel_worker_start = controller.get('options', {}).get( 'enable_parallel_worker_start', False) self.log.info( '{bootmsg} {method}', bootmsg=hl( 'Booting node from local configuration [parallel_worker_start={}] ..' .format(parallel_worker_start), color='green', bold=True), method=hltype(Node.boot_from_config)) # start Manhole in node controller if 'manhole' in controller: yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions()) self.log.debug("controller: manhole started") # startup all workers workers = config.get('workers', []) if len(workers): self.log.info( hl('Will start {} worker{} ..'.format( len(workers), 's' if len(workers) > 1 else ''), color='green', bold=True)) else: self.log.info( hl('No workers configured, nothing to do', color='green', bold=True)) dl = [] for worker in workers: # worker ID if 'id' in worker: worker_id = worker['id'] else: worker_id = u'worker{:03d}'.format(self._worker_no) worker['id'] = worker_id self._worker_no += 1 # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest') worker_type = worker['type'] # native worker processes setup if worker_type in self._native_workers: # set logname depending on native worker type worker_logname = '{} {}'.format( self._native_workers[worker_type]['logname'], hlid(worker_id)) # any worker specific options worker_options = worker.get('options', {}) # start the (native) worker self.log.info( "Order node to start {worker_logname}", worker_logname=worker_logname, ) d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions()) @inlineCallbacks def configure_worker(res, worker_logname, worker_type, worker_id, worker): self.log.info( "Ok, node has started {worker_logname}", worker_logname=worker_logname, ) # now configure the worker self.log.info( "Configuring {worker_logname} ..", worker_logname=worker_logname, ) method_name = '_configure_native_worker_{}'.format( worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError( "A native worker of type '{}' is configured but " "there is no method '{}' on {}".format( worker_type, method_name, type(self))) yield config_fn(worker_logname, worker_id, worker) self.log.info( "Ok, {worker_logname} configured", worker_logname=worker_logname, ) d.addCallback(configure_worker, worker_logname, worker_type, worker_id, worker) # guest worker processes setup elif worker_type == u'guest': # now actually start the (guest) worker .. # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers # only take the options (which is part of the whole config item for the worker) d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions()) else: raise Exception( 'logic error: unexpected worker_type="{}"'.format( worker_type)) if parallel_worker_start: dl.append(d) else: yield d yield gatherResults(dl) self.log.info( hl('Ok, local node configuration booted successfully!', color='green', bold=True)) @inlineCallbacks def _configure_native_worker_common(self, worker_logname, worker_id, worker): # expanding PYTHONPATH of the newly started worker is now done # directly in NodeController._start_native_worker worker_options = worker.get('options', {}) if False: if 'pythonpath' in worker_options: added_paths = yield self._controller.call( u'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=CallOptions()) self.log.warn("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) # FIXME: as the CPU affinity is in the worker options, this _also_ (see above fix) # should be done directly in NodeController._start_native_worker if True: if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( u'crossbar.worker.{}.set_cpu_affinity'.format(worker_id), worker_options['cpu_affinity'], options=CallOptions()) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) # this is fine to start after the worker has been started, as manhole is # CB developer/support feature anyways (like a vendor diagnostics port) if 'manhole' in worker: yield self._controller.call( u'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=CallOptions()) self.log.debug("{worker}: manhole started", worker=worker_logname) @inlineCallbacks def _configure_native_worker_router(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm['id'] else: realm_id = 'realm{:03d}'.format(self._realm_no) realm['id'] = realm_id self._realm_no += 1 self.log.info( "Order {worker_logname} to start Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) yield self._controller.call( u'crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role['id'] else: role_id = 'role{:03d}'.format(self._role_no) role['id'] = role_id self._role_no += 1 self.log.info( "Order Realm {realm_id} to start Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) yield self._controller.call( u'crossbar.worker.{}.start_router_realm_role'.format( worker_id), realm_id, role_id, role, options=CallOptions()) self.log.info( "Ok, Realm {realm_id} has started Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component['id'] else: component_id = 'component{:03d}'.format(self._component_no) component['id'] = component_id self._component_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport['id'] else: transport_id = 'transport{:03d}'.format(self._transport_no) transport['id'] = transport_id self._transport_no += 1 add_paths_on_transport_create = False self.log.info( "Order {worker_logname} to start Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) yield self._controller.call( u'crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, create_paths=add_paths_on_transport_create, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) if not add_paths_on_transport_create: if transport['type'] == 'web': paths = transport.get('paths', {}) elif transport['type'] == 'universal': paths = transport.get('web', {}).get('paths', {}) else: paths = None # Web service paths if paths: for path in sorted(paths): if path != '/': webservice = paths[path] if 'id' in webservice: webservice_id = webservice['id'] else: webservice_id = 'webservice{:03d}'.format( self._webservice_no) webservice['id'] = webservice_id self._webservice_no += 1 self.log.info( "Order Transport {transport_id} to start Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) yield self._controller.call( u'crossbar.worker.{}.start_web_transport_service' .format(worker_id), transport_id, path, webservice, options=CallOptions()) self.log.info( "Ok, Transport {transport_id} has started Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) @inlineCallbacks def _configure_native_worker_container(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # if components exit "very soon after" we try to start them, # we consider that a failure and shut our node down. We remove # this subscription 2 seconds after we're done starting # everything (see below). This is necessary as start_component # returns as soon as we've established a connection to the # component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = u'crossbar.worker.{}.container.on_component_stop'.format( worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component['id'] else: component_id = 'component{:03d}'.format(self._component_no) component['id'] = component_id self._component_no += 1 yield self._controller.call( u'crossbar.worker.{}.start_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) @inlineCallbacks def _configure_native_worker_websocket_testee(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport{:03d}'.format(self._transport_no) transport['id'] = transport_id self._transport_no = 1 yield self._controller.call( u'crossbar.worker.{}.start_websocket_testee_transport'.format( worker_id), transport_id, transport, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, )
def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: self.check_config() controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_title) # the node's name (must be unique within the management realm) if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() if 'manager' in controller_config: extra = { 'onready': Deferred(), # authentication information for connecting to uplinkg CDC router # using WAMP-CRA authentication # 'authid': self._node_id, 'authkey': controller_config['manager']['key'] } realm = controller_config['manager']['realm'] transport = controller_config['manager']['transport'] runner = ApplicationRunner(url=transport['url'], realm=realm, extra=extra, debug_wamp=False) runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._management_session = yield extra['onready'] self.log.info("Node is connected to Crossbar.io DevOps Center (CDC)") else: self._management_session = None # the node's management realm self._realm = controller_config.get('realm', 'crossbar') # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._management_session: self._bridge_session = NodeManagementBridgeSession(cfg, self._management_session) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.info("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True for line in e.args[0].strip().splitlines(): self.log.error(line) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") if not cdc_mode and not self._config.get( "controller", {}) and not self._config.get("workers", {}): self.log.warn( ("You seem to have no controller config or workers, nor are " "starting up in CDC mode. Check your config exists, or pass " "--cdc to `crossbar start`.")) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass return # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) # create a new router for the realm # rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') # add a router bridge session when running in managed mode # if cdc_mode: self._bridge_session = NodeManagementBridgeSession(cfg) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # Node shutdown mode # if cdc_mode: # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED ] else: # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info( "Overriding default node shutdown triggers with {triggers} from node config", triggers=controller_options['shutdown']) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) # add the node controller singleton session # self._controller = NodeControllerSession(self) self._router_session_factory.add(self._controller, authrole=u'trusted') # detect WAMPlets (FIXME: remove this!) # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: # startup the node from local node configuration # yield self._startup(self._config) # connect to CDC when running in managed mode # if cdc_mode: cdc_config = controller_config.get( 'cdc', { # CDC connecting transport u'transport': { u'type': u'websocket', u'url': u'wss://cdc.crossbario.com/ws', u'endpoint': { u'type': u'tcp', u'host': u'cdc.crossbario.com', u'port': 443, u'timeout': 5, u'tls': { u'hostname': u'cdc.crossbario.com' } } } }) transport = cdc_config[u'transport'] hostname = None if u'tls' in transport[u'endpoint']: transport[u'endpoint'][u'tls'][u'hostname'] runner = ApplicationRunner( url=transport['url'], realm=None, extra=None, ssl=optionsForClientTLS(hostname) if hostname else None, ) def make(config): # extra info forwarded to CDC client session extra = { 'node': self, 'on_ready': Deferred(), 'on_exit': Deferred(), 'node_key': self._node_key, } @inlineCallbacks def on_ready(res): self._manager, self._management_realm, self._node_id, self._node_extra = res if self._bridge_session: try: yield self._bridge_session.attach_manager( self._manager, self._management_realm, self._node_id) status = yield self._manager.call( u'cdc.remote.status@1') except: self.log.failure() else: self.log.info( 'Connected to CDC for management realm "{realm}" (current time is {now})', realm=self._management_realm, now=status[u'now']) else: self.log.warn( 'Uplink CDC session established, but no bridge session setup!' ) @inlineCallbacks def on_exit(res): if self._bridge_session: try: yield self._bridge_session.detach_manager() except: self.log.failure() else: self.log.info( 'Disconnected from CDC for management realm "{realm}"', realm=self._management_realm) else: self.log.warn( 'Uplink CDC session lost, but no bridge session setup!' ) self._manager, self._management_realm, self._node_id, self._node_extra = None, None, None, None extra['on_ready'].addCallback(on_ready) extra['on_exit'].addCallback(on_exit) config = ComponentConfig(extra=extra) session = NodeManagementSession(config) return session self.log.info("Connecting to CDC at '{url}' ..", url=transport[u'url']) yield runner.run(make, start_reactor=False, auto_reconnect=True) # Notify systemd that crossbar is fully up and running # (this has no effect on non-systemd platforms) try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") if not cdc_mode and not self._config.get("controller", {}) and not self._config.get("workers", {}): self.log.warn( ("You seem to have no controller config or workers, nor are " "starting up in CDC mode. Check your config exists, or pass " "--cdc to `crossbar start`.")) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass return # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory(self._router_factory) # create a new router for the realm # rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') # add a router bridge session when running in managed mode # if cdc_mode: self._bridge_session = NodeManagementBridgeSession(cfg) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # Node shutdown mode # if cdc_mode: # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED] else: # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info("Overriding default node shutdown triggers with {triggers} from node config", triggers=controller_options['shutdown']) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) # add the node controller singleton session # self._controller = NodeControllerSession(self) self._router_session_factory.add(self._controller, authrole=u'trusted') # detect WAMPlets (FIXME: remove this!) # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: # startup the node from local node configuration # yield self._startup(self._config) # connect to CDC when running in managed mode # if cdc_mode: cdc_config = controller_config.get('cdc', { # CDC connecting transport u'transport': { u'type': u'websocket', u'url': u'wss://cdc.crossbario.com/ws', u'endpoint': { u'type': u'tcp', u'host': u'cdc.crossbario.com', u'port': 443, u'timeout': 5, u'tls': { u'hostname': u'cdc.crossbario.com' } } } }) transport = cdc_config[u'transport'] hostname = None if u'tls' in transport[u'endpoint']: transport[u'endpoint'][u'tls'][u'hostname'] runner = ApplicationRunner( url=transport['url'], realm=None, extra=None, ssl=optionsForClientTLS(hostname) if hostname else None, ) def make(config): # extra info forwarded to CDC client session extra = { 'node': self, 'on_ready': Deferred(), 'on_exit': Deferred(), 'node_key': self._node_key, } @inlineCallbacks def on_ready(res): self._manager, self._management_realm, self._node_id, self._node_extra = res if self._bridge_session: try: yield self._bridge_session.attach_manager(self._manager, self._management_realm, self._node_id) status = yield self._manager.call(u'cdc.remote.status@1') except: self.log.failure() else: self.log.info('Connected to CDC for management realm "{realm}" (current time is {now})', realm=self._management_realm, now=status[u'now']) else: self.log.warn('Uplink CDC session established, but no bridge session setup!') @inlineCallbacks def on_exit(res): if self._bridge_session: try: yield self._bridge_session.detach_manager() except: self.log.failure() else: self.log.info('Disconnected from CDC for management realm "{realm}"', realm=self._management_realm) else: self.log.warn('Uplink CDC session lost, but no bridge session setup!') self._manager, self._management_realm, self._node_id, self._node_extra = None, None, None, None extra['on_ready'].addCallback(on_ready) extra['on_exit'].addCallback(on_exit) config = ComponentConfig(extra=extra) session = NodeManagementSession(config) return session self.log.info("Connecting to CDC at '{url}' ..", url=transport[u'url']) yield runner.run(make, start_reactor=False, auto_reconnect=True) # Notify systemd that crossbar is fully up and running # (this has no effect on non-systemd platforms) try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ # for now, a node is always started from a local configuration # configfile = os.path.join(self.options.cbdir, self.options.config) self.log.info("Starting from node configuration file '{configfile}'", configfile=configfile) self._config = check_config_file(configfile, silence=True) controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) controller_title = controller_options.get('title', 'crossbar-controller') try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_title) # the node's name (must be unique within the management realm) if 'manager' in self._config: self._node_id = self._config['manager']['id'] else: if 'id' in controller_config: self._node_id = controller_config['id'] else: self._node_id = socket.gethostname() if 'manager' in self._config: extra = { 'onready': Deferred() } runner = ApplicationRunner(url=u"ws://localhost:9000", realm=u"cdc-oberstet-1", extra=extra) runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._management_session = yield extra['onready'] self.log.info("Connected to Crossbar.io Management Cloud: {management_session}", management_session=self._management_session) else: self._management_session = None # the node's management realm self._realm = controller_config.get('realm', 'crossbar') # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory(self._router_factory) rlm = RouterRealm(None, {'name': self._realm}) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._management_session: self._bridge_session = NodeManagementBridgeSession(cfg, self._management_session) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # # session_config = ComponentConfig(realm = options.realm, extra = options) self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.info("No WAMPlets detected in enviroment.") try: if 'manager' in self._config: yield self._startup_managed(self._config) else: yield self._startup_standalone(self._config) except: traceback.print_exc() try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's name (must be unique within the management realm) self._node_id = None # the node's management realm self._realm = None # config of this node. self._config = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when run in "managed mode", this will hold the uplink WAMP session # from the node controller to the mananagement application self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath, silence=True) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = {u"controller": {u"cdc": {u"enabled": True}}} checkconfig.check_config(self._config) self.log.info( "Node configuration loaded from built-in CDC config.") def _prepare_node_keys(self): from nacl.signing import SigningKey from nacl.encoding import HexEncoder # make sure CBDIR/.cdc exists # cdc_dir = os.path.join(self._cbdir, '.cdc') if os.path.isdir(cdc_dir): pass elif os.path.exists(cdc_dir): raise Exception(".cdc exists, but isn't a directory") else: os.mkdir(cdc_dir) self.log.info("CDC directory created") # load node ID, either from .cdc/node.id or from CDC_NODE_ID # def split_nid(nid_s): nid_c = nid_s.strip().split('@') if len(nid_c) != 2: raise Exception( "illegal node principal '{}' - must follow the form <node id>@<management realm>" .format(nid_s)) node_id, realm = nid_c # FIXME: regex check node_id and realm return node_id, realm nid_file = os.path.join(cdc_dir, 'node.id') node_id, realm = None, None if os.path.isfile(nid_file): with open(nid_file, 'r') as f: node_id, realm = split_nid(f.read()) elif os.path.exists(nid_file): raise Exception("{} exists, but isn't a file".format(nid_file)) else: if 'CDC_NODE_ID' in os.environ: node_id, realm = split_nid(os.environ['CDC_NODE_ID']) else: raise Exception( "Neither node ID file {} exists nor CDC_NODE_ID environment variable set" .format(nid_file)) # Load the node key, either from .cdc/node.key or from CDC_NODE_KEY. # The node key is a Ed25519 key in either raw format (32 bytes) or in # hex-encoded form (64 characters). # # Actually, what's loaded is not the secret Ed25519 key, but the _seed_ # for that key. Private keys are derived from this 32-byte (256-bit) # random seed value. It is thus the seed value which is sensitive and # must be protected. # skey_file = os.path.join(cdc_dir, 'node.key') skey = None if os.path.isfile(skey_file): # FIXME: check file permissions are 0600! # This value is read in here. skey_len = os.path.getsize(skey_file) if skey_len in (32, 64): with open(skey_file, 'r') as f: skey_seed = f.read() encoder = None if skey_len == 64: encoder = HexEncoder skey = SigningKey(skey_seed, encoder=encoder) self.log.info("Existing CDC node key loaded from {skey_file}.", skey_file=skey_file) else: raise Exception( "invalid node key length {} (key must either be 32 raw bytes or hex encoded 32 bytes, hence 64 byte char length)" ) elif os.path.exists(skey_file): raise Exception("{} exists, but isn't a file".format(skey_file)) else: skey = SigningKey.generate() skey_seed = skey.encode(encoder=HexEncoder) with open(skey_file, 'w') as f: f.write(skey_seed) # set file mode to read only for owner # 384 (decimal) == 0600 (octal) - we use that for Py2/3 reasons os.chmod(skey_file, 384) self.log.info("New CDC node key {skey_file} generated.", skey_file=skey_file) return realm, node_id, skey @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # the node controller realm # self._realm = controller_config.get('realm', 'crossbar') # the node's name (must be unique within the management realm when running # in "managed mode") # if 'id' in controller_config: self._node_id = controller_config['id'] self.log.info("Node ID '{node_id}' set from config", node_id=self._node_id) elif 'CDC_ID' in os.environ: self._node_id = u'{}'.format(os.environ['CDC_ID']) self.log.info( "Node ID '{node_id}' set from environment variable CDC_ID", node_id=self._node_id) else: self._node_id = u'{}'.format(socket.gethostname()) self.log.info("Node ID '{node_id}' set from hostname", node_id=self._node_id) # standalone vs managed mode # if 'cdc' in controller_config and controller_config['cdc'].get( 'enabled', False): self._prepare_node_keys() cdc_config = controller_config['cdc'] # CDC connecting transport # if 'transport' in cdc_config: transport = cdc_config['transport'] if 'tls' in transport['endpoint']: hostname = transport['endpoint']['tls']['hostname'] else: raise Exception( "TLS activated on CDC connection, but 'hostname' not provided" ) self.log.warn( "CDC transport configuration overridden from node config!") else: transport = { "type": u"websocket", "url": u"wss://devops.crossbario.com/ws", "endpoint": { "type": u"tcp", "host": u"devops.crossbario.com", "port": 443, "timeout": 5, "tls": { "hostname": u"devops.crossbario.com" } } } hostname = u'devops.crossbario.com' # CDC management realm # if 'realm' in cdc_config: realm = cdc_config['realm'] self.log.info("CDC management realm '{realm}' set from config", realm=realm) elif 'CDC_REALM' in os.environ: realm = u"{}".format(os.environ['CDC_REALM']).strip() self.log.info( "CDC management realm '{realm}' set from enviroment variable CDC_REALM", realm=realm) else: raise Exception( "CDC management realm not set - either 'realm' must be set in node configuration, or in CDC_REALM enviroment variable" ) # CDC authentication credentials (for WAMP-CRA) # authid = self._node_id if 'secret' in cdc_config: authkey = cdc_config['secret'] self.log.info("CDC authentication secret loaded from config") elif 'CDC_SECRET' in os.environ: authkey = u"{}".format(os.environ['CDC_SECRET']).strip() self.log.info( "CDC authentication secret loaded from environment variable CDC_SECRET" ) else: raise Exception( "CDC authentication secret not set - either 'secret' must be set in node configuration, or in CDC_SECRET enviroment variable" ) # extra info forwarded to CDC client session # extra = { 'node': self, 'onready': Deferred(), 'onexit': Deferred(), 'authid': authid, 'authkey': authkey } runner = ApplicationRunner( url=transport['url'], realm=realm, extra=extra, ssl=optionsForClientTLS(hostname), debug=False, debug_wamp=False, ) try: self.log.info("Connecting to CDC at '{url}' ..", url=transport['url']) yield runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._manager = yield extra['onready'] except Exception as e: raise Exception("Could not connect to CDC - {}".format(e)) # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED ] self.log.info( "Connected to Crossbar.io DevOps Center (CDC)! Your node runs in managed mode." ) else: self._manager = None # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info( "Overriding default node shutdown triggers with {} from node config" .format(controller_options['shutdown'])) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {}".format( self._node_shutdown_triggers)) # router and factory that creates router sessions # self._router_factory = RouterFactory(self._node_id) self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._manager: self._bridge_session = NodeManagementBridgeSession( cfg, self, self._manager) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): # fake call details information when calling into # remoted procedure locally # call_details = CallDetails(caller=0) controller = config.get('controller', {}) # start Manhole in node controller # if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers # worker_no = 1 call_options = CallOptions(disclose_me=True) for worker in config.get('workers', []): # worker ID, type and logname # if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker{}'.format(worker_no) worker_no += 1 worker_type = worker['type'] worker_options = worker.get('options', {}) if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # router/container # if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. # if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container( worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee( worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff # if 'pythonpath' in worker_options: added_paths = yield self._controller.call( 'crossbar.node.{}.worker.{}.add_pythonpath'.format( self._node_id, worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( 'crossbar.node.{}.worker.{}.set_cpu_affinity'.format( self._node_id, worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call( 'crossbar.node.{}.worker.{}.start_manhole'.format( self._node_id, worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker # if worker_type == 'router': # start realms on router # realm_no = 1 for realm in worker.get('realms', []): if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm{}'.format(realm_no) realm_no += 1 # extract schema information from WAMP-flavored Markdown # schemas = None if 'schemas' in realm: schemas = {} schema_pat = re.compile(r"```javascript(.*?)```", re.DOTALL) cnt_files = 0 cnt_decls = 0 for schema_file in realm.pop('schemas'): schema_file = os.path.join( self._cbdir, schema_file) self.log.info( "{worker}: processing WAMP-flavored Markdown file {schema_file} for WAMP schema declarations", worker=worker_logname, schema_file=schema_file) with open(schema_file, 'r') as f: cnt_files += 1 for d in schema_pat.findall(f.read()): try: o = json.loads(d) if isinstance( o, dict ) and '$schema' in o and o[ '$schema'] == u'http://wamp.ws/schema#': uri = o['uri'] if uri not in schemas: schemas[uri] = {} schemas[uri].update(o) cnt_decls += 1 except Exception: self.log.failure( "{worker}: WARNING - failed to process declaration in {schema_file} - {log_failure.value}", worker=worker_logname, schema_file=schema_file) self.log.info( "{worker}: processed {cnt_files} files extracting {cnt_decls} schema declarations and {len_schemas} URIs", worker=worker_logname, cnt_files=cnt_files, cnt_decls=cnt_decls, len_schemas=len(schemas)) enable_trace = realm.get('trace', False) yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_realm'. format(self._node_id, worker_id), realm_id, realm, schemas, enable_trace=enable_trace, options=call_options) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name'], enable_trace=enable_trace) # add roles to realm # role_no = 1 for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role{}'.format(role_no) role_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_realm_role' .format(self._node_id, worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{}: role '{}' (named '{}') started on realm '{}'" .format(worker_logname, role_id, role['name'], realm_id)) # start uplinks for realm # uplink_no = 1 for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink{}'.format(uplink_no) uplink_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_realm_uplink' .format(self._node_id, worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info( "{}: uplink '{}' started on realm '{}'".format( worker_logname, uplink_id, realm_id)) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router # connection_no = 1 for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection{}'.format( connection_no) connection_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_connection'. format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format( worker_logname, connection_id)) # start components to run embedded in the router # component_no = 1 for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_component' .format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info("{}: component '{}' started".format( worker_logname, component_id)) # start transports on router # transport_no = 1 for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport{}'.format(transport_no) transport_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_router_transport' .format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format( worker_logname, transport_id)) # setup container worker # elif worker_type == 'container': component_no = 1 # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.node.{}.worker.{}.container.on_component_stop'.format( self._node_id, worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # connection_no = 1 for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection{}'.format( connection_no) connection_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_connection'. format(self._node_id, worker_id), connection_id, connection, options=call_options) self.log.info("{}: connection '{}' started".format( worker_logname, connection_id)) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component{}'.format(component_no) component_no += 1 yield self._controller.call( 'crossbar.node.{}.worker.{}.start_container_component' .format(self._node_id, worker_id), component_id, component, options=call_options) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker # elif worker_type == 'websocket-testee': # start transports on router # transport = worker['transport'] transport_no = 1 transport_id = 'transport{}'.format(transport_no) yield self._controller.call( 'crossbar.node.{}.worker.{}.start_websocket_testee_transport' .format(self._node_id, worker_id), transport_id, transport, options=call_options) self.log.info("{}: transport '{}' started".format( worker_logname, transport_id)) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # the node controller realm # self._realm = controller_config.get('realm', 'crossbar') # the node's name (must be unique within the management realm when running # in "managed mode") # if 'id' in controller_config: self._node_id = controller_config['id'] self.log.info("Node ID '{node_id}' set from config", node_id=self._node_id) elif 'CDC_ID' in os.environ: self._node_id = u'{}'.format(os.environ['CDC_ID']) self.log.info( "Node ID '{node_id}' set from environment variable CDC_ID", node_id=self._node_id) else: self._node_id = u'{}'.format(socket.gethostname()) self.log.info("Node ID '{node_id}' set from hostname", node_id=self._node_id) # standalone vs managed mode # if 'cdc' in controller_config and controller_config['cdc'].get( 'enabled', False): self._prepare_node_keys() cdc_config = controller_config['cdc'] # CDC connecting transport # if 'transport' in cdc_config: transport = cdc_config['transport'] if 'tls' in transport['endpoint']: hostname = transport['endpoint']['tls']['hostname'] else: raise Exception( "TLS activated on CDC connection, but 'hostname' not provided" ) self.log.warn( "CDC transport configuration overridden from node config!") else: transport = { "type": u"websocket", "url": u"wss://devops.crossbario.com/ws", "endpoint": { "type": u"tcp", "host": u"devops.crossbario.com", "port": 443, "timeout": 5, "tls": { "hostname": u"devops.crossbario.com" } } } hostname = u'devops.crossbario.com' # CDC management realm # if 'realm' in cdc_config: realm = cdc_config['realm'] self.log.info("CDC management realm '{realm}' set from config", realm=realm) elif 'CDC_REALM' in os.environ: realm = u"{}".format(os.environ['CDC_REALM']).strip() self.log.info( "CDC management realm '{realm}' set from enviroment variable CDC_REALM", realm=realm) else: raise Exception( "CDC management realm not set - either 'realm' must be set in node configuration, or in CDC_REALM enviroment variable" ) # CDC authentication credentials (for WAMP-CRA) # authid = self._node_id if 'secret' in cdc_config: authkey = cdc_config['secret'] self.log.info("CDC authentication secret loaded from config") elif 'CDC_SECRET' in os.environ: authkey = u"{}".format(os.environ['CDC_SECRET']).strip() self.log.info( "CDC authentication secret loaded from environment variable CDC_SECRET" ) else: raise Exception( "CDC authentication secret not set - either 'secret' must be set in node configuration, or in CDC_SECRET enviroment variable" ) # extra info forwarded to CDC client session # extra = { 'node': self, 'onready': Deferred(), 'onexit': Deferred(), 'authid': authid, 'authkey': authkey } runner = ApplicationRunner( url=transport['url'], realm=realm, extra=extra, ssl=optionsForClientTLS(hostname), debug=False, debug_wamp=False, ) try: self.log.info("Connecting to CDC at '{url}' ..", url=transport['url']) yield runner.run(NodeManagementSession, start_reactor=False) # wait until we have attached to the uplink CDC self._manager = yield extra['onready'] except Exception as e: raise Exception("Could not connect to CDC - {}".format(e)) # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED ] self.log.info( "Connected to Crossbar.io DevOps Center (CDC)! Your node runs in managed mode." ) else: self._manager = None # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info( "Overriding default node shutdown triggers with {} from node config" .format(controller_options['shutdown'])) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {}".format( self._node_shutdown_triggers)) # router and factory that creates router sessions # self._router_factory = RouterFactory(self._node_id) self._router_session_factory = RouterSessionFactory( self._router_factory) rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') if self._manager: self._bridge_session = NodeManagementBridgeSession( cfg, self, self._manager) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # the node controller singleton WAMP application session # self._controller = NodeControllerSession(self) # add the node controller singleton session to the router # self._router_session_factory.add(self._controller, authrole=u'trusted') # Detect WAMPlets # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: yield self._startup(self._config) except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass
class RouterWorkerSession(NativeWorkerSession): """ A native Crossbar.io worker that runs a WAMP router which can manage multiple realms, run multiple transports and links, as well as host multiple (embedded) application components. """ WORKER_TYPE = "router" @inlineCallbacks def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # factory for producing (per-realm) routers self._router_factory = RouterFactory() # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: transport ID -> RouterTransport self.transports = {} # map: link ID -> RouterLink self.links = {} # map: component ID -> RouterComponent self.components = {} # the procedures registered procs = [ "get_router_realms", "start_router_realm", "stop_router_realm", "get_router_realm_roles", "start_router_realm_role", "stop_router_realm_role", "get_router_components", "start_router_component", "stop_router_component", "get_router_transports", "start_router_transport", "stop_router_transport", "get_router_links", "start_router_link", "stop_router_link", ] dl = [] for proc in procs: uri = "{}.{}".format(self._uri_prefix, proc) self.log.debug("Registering management API procedure {proc}", proc=uri) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg="details"))) regs = yield DeferredList(dl) self.log.debug("Registered {cnt} management API procedures", cnt=len(regs)) # NativeWorkerSession.publish_ready() yield self.publish_ready() def get_router_realms(self, details=None): """ List realms currently managed by this router. """ self.log.debug("{}.get_router_realms".format(self.__class__.__name__)) raise Exception("not implemented") def start_router_realm(self, id, config, schemas=None, details=None): """ Starts a realm managed by this router. :param id: The ID of the realm to start. :type id: str :param config: The realm configuration. :type config: dict :param schemas: An (optional) initial schema dictionary to load. :type schemas: dict """ self.log.debug("{}.start_router_realm".format(self.__class__.__name__), id=id, config=config, schemas=schemas) # URI of the realm to start realm = config["name"] # track realm rlm = RouterRealm(id, config) self.realms[id] = rlm self.realm_to_id[realm] = id # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(realm) rlm.session = RouterServiceSession(cfg, router, schemas) self._router_session_factory.add(rlm.session, authrole=u"trusted") def stop_router_realm(self, id, close_sessions=False, details=None): """ Stop a router realm. When a realm has stopped, no new session will be allowed to attach to the realm. Optionally, close all sessions currently attached to the realm. :param id: ID of the realm to stop. :type id: str :param close_sessions: If `True`, close all session currently attached. :type close_sessions: bool """ self.log.debug("{}.stop_router_realm".format(self.__class__.__name__), id=id, close_sessions=close_sessions) # FIXME raise NotImplementedError() def get_router_realm_roles(self, id, details=None): """ :param id: The ID of the router realm to list roles for. :type id: str :returns: list -- A list of roles. """ self.log.debug("{}.get_router_realm_roles".format(self.__class__.__name__), id=id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) return self.realms[id].roles.values() def start_router_realm_role(self, id, role_id, config, details=None): """ Adds a role to a realm. :param id: The ID of the realm the role should be added to. :type id: str :param role_id: The ID of the role to add. :type role_id: str :param config: The role configuration. :type config: dict """ self.log.debug( "{}.add_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id, config=config ) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id in self.realms[id].roles: raise ApplicationError( u"crossbar.error.already_exists", "A role with ID '{}' already exists in realm with ID '{}'".format(role_id, id), ) self.realms[id].roles[role_id] = RouterRealmRole(role_id, config) realm = self.realms[id].config["name"] self._router_factory.add_role(realm, config) def stop_router_realm_role(self, id, role_id, details=None): """ Drop a role from a realm. :param id: The ID of the realm to drop a role from. :type id: str :param role_id: The ID of the role within the realm to drop. :type role_id: str """ self.log.debug("{}.drop_router_realm_role".format(self.__class__.__name__), id=id, role_id=role_id) if id not in self.realms: raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id not in self.realms[id].roles: raise ApplicationError( u"crossbar.error.no_such_object", "No role with ID '{}' in realm with ID '{}'".format(role_id, id) ) del self.realms[id].roles[role_id] def get_router_components(self, details=None): """ List application components currently running (embedded) in this router. """ self.log.debug("{}.get_router_components".format(self.__class__.__name__)) res = [] for component in sorted(self.components.values(), key=lambda c: c.created): res.append({"id": component.id, "created": utcstr(component.created), "config": component.config}) return res def start_router_component(self, id, config, details=None): """ Dynamically start an application component to run next to the router in "embedded mode". :param id: The ID of the component to start. :type id: str :param config: The component configuration. :type config: obj """ self.log.debug("{}.start_router_component".format(self.__class__.__name__), id=id, config=config) # prohibit starting a component twice # if id in self.components: emsg = "Could not start component: a component with ID '{}'' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.already_running", emsg) # check configuration # try: checkconfig.check_router_component(config) except Exception as e: emsg = "Invalid router component configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {type}-component on router.", type=config["type"]) # resolve references to other entities # references = {} for ref in config.get("references", []): ref_type, ref_id = ref.split(":") if ref_type == u"connection": if ref_id in self._connections: references[ref] = self._connections[ref_id] else: emsg = "cannot resolve reference '{}' - no '{}' with ID '{}'".format(ref, ref_type, ref_id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: emsg = "cannot resolve reference '{}' - invalid reference type '{}'".format(ref, ref_type) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # create component config # realm = config["realm"] extra = config.get("extra", None) component_config = ComponentConfig(realm=realm, extra=extra) create_component = _appsession_loader(config) # .. and create and add an WAMP application session to # run the component next to the router # try: session = create_component(component_config) # any exception spilling out from user code in onXXX handlers is fatal! def panic(fail, msg): self.log.error("Fatal error in component: {} - {}".format(msg, fail.value)) session.disconnect() session._swallow_error = panic except Exception as e: msg = "{}".format(e).strip() self.log.error("Component instantiation failed:\n\n{err}", err=msg) raise self.components[id] = RouterComponent(id, config, session) self._router_session_factory.add(session, authrole=config.get("role", u"anonymous")) self.log.debug("Added component {id}", id=id) def stop_router_component(self, id, details=None): """ Stop an application component running on this router. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.stop_router_component`` **Errors:** The procedure may raise the following errors: * ``crossbar.error.no_such_object`` - no component with given ID is currently running in this router * ``crossbar.error.cannot_stop`` - failed to stop the component running in this router :param id: The ID of the component to stop. :type id: unicode """ self.log.debug("{}.stop_router_component".format(self.__class__.__name__), id=id) if id in self.components: self.log.debug("Worker {}: stopping component {}".format(self.config.extra.worker, id)) try: # self._components[id].disconnect() self._session_factory.remove(self.components[id]) del self.components[id] except Exception as e: raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop component {}: {}".format(id, e)) else: raise ApplicationError(u"crossbar.error.no_such_object", "No component {}".format(id)) def get_router_transports(self, details=None): """ List currently running transports. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.get_router_transports`` :returns: List of transports currently running. :rtype: list of dict """ self.log.debug("{}.get_router_transports".format(self.__class__.__name__)) res = [] for transport in sorted(self.transports.values(), key=lambda c: c.created): res.append({"id": transport.id, "created": utcstr(transport.created), "config": transport.config}) return res def start_router_transport(self, id, config, details=None): """ Start a transport on this router and return when the transport has started. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.start_router_transport`` The procedure takes a WAMP transport configuration with a listening endpoint, e.g. .. code-block:: javascript { "type": "websocket", "endpoint": { "type": "tcp", "port": 8080 } } **Errors:** The procedure may raise the following errors: * ``crossbar.error.invalid_configuration`` - the provided transport configuration is invalid * ``crossbar.error.already_running`` - a transport with the given ID is already running (or starting) * ``crossbar.error.cannot_listen`` - could not listen on the configured listening endpoint of the transport * ``crossbar.error.class_import_failed`` - a side-by-side component could not be instantiated **Events:** The procedure will publish an event when the transport **is starting** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_starting`` and publish an event when the transport **has started** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_started`` :param id: The ID of the transport to start. :type id: unicode :param config: The transport configuration. :type config: dict """ self.log.debug("{}.start_router_transport".format(self.__class__.__name__), id=id, config=config) # prohibit starting a transport twice # if id in self.transports: emsg = "Could not start transport: a transport with ID '{}' is already running (or starting)".format(id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.already_running", emsg) # check configuration # try: checkconfig.check_router_transport(config) except Exception as e: emsg = "Invalid router transport configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: self.log.debug("Starting {}-transport on router.".format(config["type"])) # standalone WAMP-RawSocket transport # if config["type"] == "rawsocket": transport_factory = WampRawSocketServerFactory(self._router_session_factory, config) transport_factory.noisy = False # standalone WAMP-WebSocket transport # elif config["type"] == "websocket": transport_factory = WampWebSocketServerFactory( self._router_session_factory, self.config.extra.cbdir, config, self._templates ) transport_factory.noisy = False # Flash-policy file server pseudo transport # elif config["type"] == "flashpolicy": transport_factory = FlashPolicyFactory( config.get("allowed_domain", None), config.get("allowed_ports", None) ) # WebSocket testee pseudo transport # elif config["type"] == "websocket.testee": transport_factory = WebSocketTesteeServerFactory(config, self._templates) # Stream testee pseudo transport # elif config["type"] == "stream.testee": transport_factory = StreamTesteeServerFactory() # Twisted Web based transport # elif config["type"] == "web": options = config.get("options", {}) # create Twisted Web root resource # if "/" in config["paths"]: root_config = config["paths"]["/"] root = self.create_resource(root_config, nested=False) else: root = Resource404(self._templates, b"") # create Twisted Web resources on all non-root paths configured # self.add_paths(root, config.get("paths", {})) # create the actual transport factory # transport_factory = Site(root) transport_factory.noisy = False # Web access logging # if not options.get("access_log", False): transport_factory.log = lambda _: None # Traceback rendering # transport_factory.displayTracebacks = options.get("display_tracebacks", False) # HSTS # if options.get("hsts", False): if "tls" in config["endpoint"]: hsts_max_age = int(options.get("hsts_max_age", 31536000)) transport_factory.requestFactory = createHSTSRequestFactory( transport_factory.requestFactory, hsts_max_age ) else: self.log.warn("Warning: HSTS requested, but running on non-TLS - skipping HSTS") # Unknown transport type # else: # should not arrive here, since we did check_transport() in the beginning raise Exception("logic error") # create transport endpoint / listening port from transport factory # d = create_listening_port_from_config( config["endpoint"], transport_factory, self.config.extra.cbdir, self._reactor ) def ok(port): self.transports[id] = RouterTransport(id, config, transport_factory, port) self.log.debug("Router transport '{}'' started and listening".format(id)) return def fail(err): emsg = "Cannot listen on transport endpoint: {}".format(err.value) self.log.error(emsg) raise ApplicationError(u"crossbar.error.cannot_listen", emsg) d.addCallbacks(ok, fail) return d def add_paths(self, resource, paths): """ Add all configured non-root paths under a resource. :param resource: The parent resource under which to add paths. :type resource: Resource :param paths: The path configurations. :type paths: dict """ for path in sorted(paths): if isinstance(path, six.text_type): webPath = path.encode("utf8") else: webPath = path if path != b"/": resource.putChild(webPath, self.create_resource(paths[path])) def create_resource(self, path_config, nested=True): """ Creates child resource to be added to the parent. :param path_config: Configuration for the new child resource. :type path_config: dict :returns: Resource -- the new child resource """ # WAMP-WebSocket resource # if path_config["type"] == "websocket": ws_factory = WampWebSocketServerFactory( self._router_session_factory, self.config.extra.cbdir, path_config, self._templates ) # FIXME: Site.start/stopFactory should start/stop factories wrapped as Resources ws_factory.startFactory() return WebSocketResource(ws_factory) # Static file hierarchy resource # elif path_config["type"] == "static": static_options = path_config.get("options", {}) if "directory" in path_config: static_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config["directory"])) elif "package" in path_config: if "resource" not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(path_config["package"]) except ImportError as e: emsg = "Could not import resource {} from package {}: {}".format( path_config["resource"], path_config["package"], e ) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: try: static_dir = os.path.abspath( pkg_resources.resource_filename(path_config["package"], path_config["resource"]) ) except Exception as e: emsg = "Could not import resource {} from package {}: {}".format( path_config["resource"], path_config["package"], e ) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) else: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing web spec") static_dir = static_dir.encode("ascii", "ignore") # http://stackoverflow.com/a/20433918/884770 # create resource for file system hierarchy # if static_options.get("enable_directory_listing", False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = static_options.get("cache_timeout", DEFAULT_CACHE_TIMEOUT) static_resource = static_resource_class(static_dir, cache_timeout=cache_timeout) # set extra MIME types # static_resource.contentTypes.update(EXTRA_MIME_TYPES) if "mime_types" in static_options: static_resource.contentTypes.update(static_options["mime_types"]) patchFileContentTypes(static_resource) # render 404 page on any concrete path not found # static_resource.childNotFound = Resource404(self._templates, static_dir) return static_resource # WSGI resource # elif path_config["type"] == "wsgi": if not _HAS_WSGI: raise ApplicationError(u"crossbar.error.invalid_configuration", "WSGI unsupported") if "module" not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app module") if "object" not in path_config: raise ApplicationError(u"crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = path_config["module"] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError( u"crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path), ) else: obj_name = path_config["object"] if obj_name not in mod.__dict__: raise ApplicationError( u"crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name), ) else: app = getattr(mod, obj_name) # Create a threadpool for running the WSGI requests in pool = ThreadPool( maxthreads=path_config.get("maxthreads", 20), minthreads=path_config.get("minthreads", 0), name="crossbar_wsgi_threadpool", ) self._reactor.addSystemEventTrigger("before", "shutdown", pool.stop) pool.start() # Create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(self._reactor, pool, app) if not nested: wsgi_resource = WSGIRootResource(wsgi_resource, {}) except Exception as e: raise ApplicationError( u"crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e) ) else: return wsgi_resource # Redirecting resource # elif path_config["type"] == "redirect": redirect_url = path_config["url"].encode("ascii", "ignore") return RedirectResource(redirect_url) # JSON value resource # elif path_config["type"] == "json": value = path_config["value"] return JsonResource(value) # CGI script resource # elif path_config["type"] == "cgi": cgi_processor = path_config["processor"] cgi_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config["directory"])) cgi_directory = cgi_directory.encode("ascii", "ignore") # http://stackoverflow.com/a/20433918/884770 return CgiDirectory(cgi_directory, cgi_processor, Resource404(self._templates, cgi_directory)) # WAMP-Longpoll transport resource # elif path_config["type"] == "longpoll": path_options = path_config.get("options", {}) lp_resource = WampLongPollResource( self._router_session_factory, timeout=path_options.get("request_timeout", 10), killAfter=path_options.get("session_timeout", 30), queueLimitBytes=path_options.get("queue_limit_bytes", 128 * 1024), queueLimitMessages=path_options.get("queue_limit_messages", 100), debug=path_options.get("debug", False), debug_transport_id=path_options.get("debug_transport_id", None), ) lp_resource._templates = self._templates return lp_resource # Publisher resource (part of REST-bridge) # elif path_config["type"] == "publisher": # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=path_config["realm"], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publisher session to the router # self._router_session_factory.add(publisher_session, authrole=path_config.get("role", "anonymous")) # now create the publisher Twisted Web resource # return PublisherResource(path_config.get("options", {}), publisher_session) # Webhook resource (part of REST-bridge) # elif path_config["type"] == "webhook": # create a vanilla session: the webhook will use this to inject events # webhook_session_config = ComponentConfig(realm=path_config["realm"], extra=None) webhook_session = ApplicationSession(webhook_session_config) # add the webhook session to the router # self._router_session_factory.add(webhook_session, authrole=path_config.get("role", "anonymous")) # now create the webhook Twisted Web resource # return WebhookResource(path_config.get("options", {}), webhook_session) # Caller resource (part of REST-bridge) # elif path_config["type"] == "caller": # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=path_config["realm"], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=path_config.get("role", "anonymous")) # now create the caller Twisted Web resource # return CallerResource(path_config.get("options", {}), caller_session) # File Upload resource # elif path_config["type"] == "upload": upload_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config["directory"])) upload_directory = upload_directory.encode("ascii", "ignore") # http://stackoverflow.com/a/20433918/884770 if not os.path.isdir(upload_directory): emsg = "configured upload directory '{}' in file upload resource isn't a directory".format( upload_directory ) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) if "temp_directory" in path_config: temp_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config["temp_directory"])) temp_directory = temp_directory.encode("ascii", "ignore") # http://stackoverflow.com/a/20433918/884770 else: temp_directory = os.path.abspath(tempfile.gettempdir()) temp_directory = os.path.join(temp_directory, "crossbar-uploads") if not os.path.exists(temp_directory): os.makedirs(temp_directory) if not os.path.isdir(temp_directory): emsg = "configured temp directory '{}' in file upload resource isn't a directory".format(temp_directory) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # file upload progress and finish events are published via this session # upload_session_config = ComponentConfig(realm=path_config["realm"], extra=None) upload_session = ApplicationSession(upload_session_config) self._router_session_factory.add(upload_session, authrole=path_config.get("role", "anonymous")) self.log.info( "File upload resource started. Uploads to {upl} using temp folder {tmp}.", upl=upload_directory, tmp=temp_directory, ) return FileUploadResource( upload_directory, temp_directory, path_config["form_fields"], upload_session, path_config.get("options", {}), ) # Generic Twisted Web resource # elif path_config["type"] == "resource": try: klassname = path_config["classname"] self.log.debug("Starting class '{}'".format(klassname)) c = klassname.split(".") module_name, klass_name = ".".join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) return make(path_config.get("extra", {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) self.log.error(emsg) self.log.error("PYTHONPATH: {pythonpath}", pythonpath=sys.path) raise ApplicationError(u"crossbar.error.class_import_failed", emsg) # Schema Docs resource # elif path_config["type"] == "schemadoc": realm = path_config["realm"] if realm not in self.realm_to_id: raise ApplicationError( u"crossbar.error.no_such_object", "No realm with URI '{}' configured".format(realm) ) realm_id = self.realm_to_id[realm] realm_schemas = self.realms[realm_id].session._schemas return SchemaDocResource(self._templates, realm, realm_schemas) # Nested subpath resource # elif path_config["type"] == "path": nested_paths = path_config.get("paths", {}) if "/" in nested_paths: nested_resource = self.create_resource(nested_paths["/"]) else: nested_resource = Resource() # nest subpaths under the current entry # self.add_paths(nested_resource, nested_paths) return nested_resource else: raise ApplicationError( u"crossbar.error.invalid_configuration", "invalid Web path type '{}' in {} config".format(path_config["type"], "nested" if nested else "root"), ) def stop_router_transport(self, id, details=None): """ Stop a transport currently running in this router and return when the transport has stopped. **Usage:** This procedure is registered under * ``crossbar.node.<node_id>.worker.<worker_id>.stop_router_transport`` **Errors:** The procedure may raise the following errors: * ``crossbar.error.not_running`` - no transport with given ID is currently running on this router (or the transport is already stopping) * ``crossbar.error.cannot_stop`` - could not stop listening on the transport listening endpoint **Events:** The procedure will publish an event when the transport **is stopping** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_stopping`` and publish an event when the transport **has stopped** to * ``crossbar.node.<node_id>.worker.<worker_id>.on_router_transport_stopped`` :param id: The ID of the transport to stop. :type id: unicode """ self.log.debug("{}.stop_router_transport".format(self.__class__.__name__), id=id) # FIXME if id not in self.transports: # if not id in self.transports or self.transports[id].status != 'started': emsg = "Cannot stop transport: no transport with ID '{}' or transport is already stopping".format(id) self.log.error(emsg) raise ApplicationError(u"crossbar.error.not_running", emsg) self.log.debug("Stopping transport with ID '{}'".format(id)) d = self.transports[id].port.stopListening() def ok(_): del self.transports[id] def fail(err): raise ApplicationError(u"crossbar.error.cannot_stop", "Failed to stop transport: {}".format(str(err.value))) d.addCallbacks(ok, fail) return d def get_router_links(self, details=None): """ List currently running router links. """ self.log.debug("{}.get_router_links".format(self.__class__.__name__)) raise NotImplementedError() def start_router_link(self, id, config, details=None): """ Start a link on this router. :param id: The ID of the link to start. :type id: str :param config: The link configuration. :type config: dict """ self.log.debug("{}.start_router_link".format(self.__class__.__name__), id=id, config=config) raise NotImplementedError() def stop_router_link(self, id, details=None): """ Stop a link on this router. :param id: The ID of the link to stop. :type id: str """ self.log.debug("{}.stop_router_link".format(self.__class__.__name__), id=id) raise NotImplementedError()
class TestBrokerPublish(unittest.TestCase): """ Tests for crossbar.router.broker.Broker """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory(None, None) # start a realm self.realm = RouterRealm(None, {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass def test_add(self): """ Create an application session and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): txaio.resolve(d, None) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session) return d def test_application_session_internal_error(self): """ simulate an internal error triggering the 'onJoin' error-case from RouterApplicationSession's send() method (from the Hello msg) """ # setup the_exception = RuntimeError("sadness") errors = [] class TestSession(ApplicationSession): def onJoin(self, *args, **kw): raise the_exception def onUserError(self, fail, msg): errors.append((fail, msg)) session = TestSession(types.ComponentConfig(u'realm1')) from crossbar.router.session import RouterApplicationSession # Note to self: original code was logging directly in # RouterApplicationSession -- which *may* actually be better? # or not... with mock.patch.object(RouterApplicationSession, 'log') as logger: # this should call onJoin, triggering our error self.session_factory.add(session) if True: self.assertEqual(1, len(errors), "Didn't see our error") self.assertEqual(the_exception, errors[0][0].value) else: # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertEqual(call[1][0].value, the_exception) def test_router_session_internal_error_onHello(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onHello """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onHello = boom session.onOpen(transport) msg = message.Hello(u'realm1', dict(caller=role.RoleCallerFeatures())) # XXX think: why isn't this using _RouterSession.log? from crossbar.router.session import RouterSession with mock.patch.object(RouterSession, 'log') as logger: # do the test; should call onHello which is now "boom", above session.onMessage(msg) # check we got the right log.failure() call self.assertTrue(len(logger.method_calls) > 0) call = logger.method_calls[0] # for a MagicMock call-object, 0th thing is the method-name, 1st # thing is the arg-tuple, 2nd thing is the kwargs. self.assertEqual(call[0], 'failure') self.assertTrue('failure' in call[2]) self.assertEqual(call[2]['failure'].value, the_exception) def test_router_session_internal_error_onAuthenticate(self): """ similar to above, but during _RouterSession's onMessage handling, where it calls self.onAuthenticate) """ # setup transport = mock.MagicMock() transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') the_exception = RuntimeError("kerblam") def boom(*args, **kw): raise the_exception session = self.session_factory() # __call__ on the _RouterSessionFactory session.onAuthenticate = boom session.onOpen(transport) msg = message.Authenticate(u'bogus signature') # do the test; should call onHello which is now "boom", above session.onMessage(msg) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors), "Expected just one error: {}".format(errors)) self.assertTrue(the_exception in [fail.value for fail in errors]) def test_router_session_goodbye_custom_message(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send(message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) leaves = [call for call in session.mock_calls if call[0] == 'onLeave'] self.assertEqual(1, len(leaves)) details = leaves[0][1][0] self.assertEqual(u'wamp.reason.logout', details.reason) self.assertEqual(u'some custom message', details.message) def test_router_session_goodbye_onLeave_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() the_exception = RuntimeError("onLeave fails") def boom(*args, **kw): raise the_exception session.onLeave = mock.Mock(side_effect=boom) session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send(message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_goodbye_fire_disconnect_error(self): """ Reason should be propagated properly from Goodbye message """ from crossbar.router.session import RouterApplicationSession session = mock.Mock() the_exception = RuntimeError("sad times at ridgemont high") def boom(*args, **kw): if args[0] == 'disconnect': return defer.fail(the_exception) return defer.succeed(None) session.fire = mock.Mock(side_effect=boom) session._realm = u'realm' router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) rap.send(message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) session.reset_mock() rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) errors = self.flushLoggedErrors() self.assertEqual(1, len(errors)) self.assertEqual(the_exception, errors[0].value) def test_router_session_lifecycle(self): """ We see all 'lifecycle' notifications. """ from crossbar.router.session import RouterApplicationSession def mock_fire(name, *args, **kw): fired.append(name) return defer.succeed(None) fired = [] session = mock.Mock() session._realm = u'realm' session.fire = mock.Mock(side_effect=mock_fire) router_factory = mock.Mock() rap = RouterApplicationSession(session, router_factory) # we never fake out the 'Welcome' message, so there will be no # 'ready' notification... rap.send(message.Hello(u'realm', {u'caller': role.RoleCallerFeatures()})) rap.send(message.Goodbye(u'wamp.reason.logout', u'some custom message')) self.assertTrue('connect' in fired) self.assertTrue('join' in fired) self.assertTrue('ready' in fired) self.assertTrue('leave' in fired) self.assertTrue('disconnect' in fired) def test_add_and_subscribe(self): """ Create an application session that subscribes to some topic and add it to a router to run embedded. """ d = txaio.create_future() class TestSession(ApplicationSession): def onJoin(self, details): d2 = self.subscribe(lambda: None, u'com.example.topic1') def ok(_): txaio.resolve(d, None) def error(err): txaio.reject(d, err) txaio.add_callbacks(d2, ok, error) session = TestSession(types.ComponentConfig(u'realm1')) self.session_factory.add(session, authrole=u'test_role') return d def test_publish_closed_session(self): """ ensure a session doesn't get Events if it's closed (see also issue #431) """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() router = mock.MagicMock() router.new_correlation_id = lambda: u'fake correlation id' broker = Broker(router, reactor) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, u'test.topic') # simulate the session state we want, which is that a # transport is connected (._transport != None) but there # _session_id *is* None (not joined yet, or left already) self.assertIs(None, session0._session_id) session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') session1._session_id = 1234 # "from" session should look connected + joined session1._transport = mock.MagicMock() session1._transport.channel_id = b'aaaabeef' # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session1, pubmsg) # neither session should have sent anything on its transport self.assertEquals(session0._transport.method_calls, []) self.assertEquals(session1._transport.method_calls, []) def test_publish_traced_events(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: u'fake correlation id' router.is_traced = True broker = Broker(router, reactor) # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to broker._subscription_map.add_observer(session0, u'test.topic') broker._subscription_map.add_observer(session1, u'test.topic') session0._session_id = 1000 session0._transport = mock.MagicMock() session0._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') session1._session_id = 1001 session1._transport = mock.MagicMock() session1._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') session2._session_id = 1002 session2._transport = mock.MagicMock() session2._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in (this "comes from" session1 # beacuse by default publishes don't go to the same session) pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session2, pubmsg) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2] ] self.assertEqual(2, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertTrue(events[1].correlation_is_last) def test_publish_traced_events_batched(self): """ with two subscribers and message tracing the last event should have a magic flag """ # we want to trigger a deeply-nested condition in # processPublish in class Broker -- lets try w/o refactoring # anything first... class TestSession(ApplicationSession): pass session0 = TestSession() session1 = TestSession() session2 = TestSession() session3 = TestSession() session4 = TestSession() # NOTE! We ensure that "session0" (the publishing session) is # *last* in the observation-list to trigger a (now fixed) # edge-case) sessions = [session1, session2, session3, session4, session0] router = mock.MagicMock() router.send = mock.Mock() router.new_correlation_id = lambda: u'fake correlation id' router.is_traced = True clock = Clock() with replace_loop(clock): broker = Broker(router, clock) broker._options.event_dispatching_chunk_size = 2 # to ensure we get "session0" last, we turn on ordering in # the observations broker._subscription_map._ordered = 1 # let's just "cheat" our way a little to the right state by # injecting our subscription "directly" (e.g. instead of # faking out an entire Subscribe etc. flow # ...so we need _subscriptions_map to have at least one # subscription (our test one) for the topic we'll publish to for session in sessions: broker._subscription_map.add_observer(session, u'test.topic') for i, sess in enumerate(sessions): sess._session_id = 1000 + i sess._transport = mock.MagicMock() sess._transport.get_channel_id = mock.MagicMock(return_value=b'deadbeef') # here's the main "cheat"; we're faking out the # router.authorize because we need it to callback immediately router.authorize = mock.MagicMock(return_value=txaio.create_future_success(dict(allow=True, cache=False, disclose=True))) # now we scan call "processPublish" such that we get to the # condition we're interested in; should go to all sessions # except session0 pubmsg = message.Publish(123, u'test.topic') broker.processPublish(session0, pubmsg) clock.advance(1) clock.advance(1) # extract all the event calls events = [ call[1][1] for call in router.send.mock_calls if call[1][0] in [session0, session1, session2, session3, session4] ] # all except session0 should have gotten an event, and # session4's should have the "last" flag set self.assertEqual(4, len(events)) self.assertFalse(events[0].correlation_is_last) self.assertFalse(events[1].correlation_is_last) self.assertFalse(events[2].correlation_is_last) self.assertTrue(events[3].correlation_is_last)
class Node(object): """ Crossbar.io Community node personality. """ # http://patorjk.com/software/taag/#p=display&h=1&f=Stick%20Letters&t=Crossbar.io BANNER = r""" __ __ __ __ __ __ __ __ / `|__)/ \/__`/__`|__) /\ |__) |/ \ \__,| \\__/.__/.__/|__)/~~\| \. |\__/ """ PERSONALITY = "Crossbar.io COMMUNITY" NODE_CONTROLLER = NodeControllerSession ROUTER_SERVICE = RouterServiceSession _native_workers = default_native_workers() # A Crossbar.io node is the running a controller process and one or multiple # worker processes. # A single Crossbar.io node runs exactly one instance of this class, hence # this class can be considered a system singleton. log = make_logger() def __init__(self, cbdir=None, reactor=None, native_workers=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # allow overriding to add (or remove) native-worker types if native_workers is not None: self._native_workers = native_workers # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # when running in managed mode, this will hold the uplink session to CFC self._manager = None # the node's management realm when running in managed mode (this comes from CFC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CFC!) self._node_id = None # node extra when running in managed mode (this comes from CFC!) self._node_extra = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def maybe_generate_key(self, cbdir, privkey_path=u'key.priv', pubkey_path=u'key.pub'): privkey_path = os.path.join(cbdir, privkey_path) pubkey_path = os.path.join(cbdir, pubkey_path) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_keyfile(privkey_path, private=True) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519']: if tag not in priv_tags: raise Exception("Corrupt node private key file {} - {} tag not found".format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = SigningKey(privkey_hex, encoder=HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) if os.path.exists(pubkey_path): pub_tags = _parse_keyfile(pubkey_path, private=False) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519']: if tag not in pub_tags: raise Exception("Corrupt node public key file {} - {} tag not found".format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) else: self.log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) self.log.debug("Node key already exists (public key: {hex})", hex=pubkey_hex) else: # node private key does not yet exist: generate one privkey = SigningKey.generate() privkey_hex = privkey.encode(encoder=HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) self.log.info("New node key pair generated!") # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) self.log.info("File permissions on node public key fixed!") if os.stat(privkey_path).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) self.log.info("File permissions on node private key fixed!") self._node_key = cryptosign.SigningKey(privkey) return pubkey_hex def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in empty config. """ if configfile: configpath = os.path.abspath(os.path.join(self._cbdir, configfile)) self.log.debug('Loading node configuration from "{configpath}" ..', configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath, self._native_workers) self.log.info('Node configuration loaded from "{configpath}"', configpath=configpath) else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } checkconfig.check_config(self._config, self._native_workers) self.log.info('Node configuration loaded from built-in config.') def _add_global_roles(self): self.log.info('No extra node router roles') def _add_worker_role(self, worker_auth_role, options): worker_role_config = { u"name": worker_auth_role, u"permissions": [ # the worker requires these permissions to work: { # worker_auth_role: "crossbar.worker.worker-001" u"uri": worker_auth_role, u"match": u"prefix", u"allow": { u"call": False, u"register": True, u"publish": True, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True }, { u"uri": u"crossbar.get_status", u"match": u"exact", u"allow": { u"call": True, u"register": False, u"publish": False, u"subscribe": False }, u"disclose": { u"caller": False, u"publisher": False }, u"cache": True } ] } self._router_factory.add_role(self._realm, worker_role_config) def _drop_worker_role(self, worker_auth_role): self._router_factory.drop_role(self._realm, worker_auth_role) def _extend_worker_args(self, args, options): pass def _add_extra_controller_components(self, controller_options): pass def _set_shutdown_triggers(self, controller_options): # allow to override node shutdown triggers # if 'shutdown' in controller_options: self._node_shutdown_triggers = controller_options['shutdown'] self.log.info("Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers) else: self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT] self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) @inlineCallbacks def start(self): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration set") # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # local node management router # self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles # self._add_global_roles() # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component # self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components # self._add_extra_controller_components(controller_options) # setup Node shutdown triggers # self._set_shutdown_triggers(controller_options) panic = False try: # startup the node personality .. yield self._startup() # .. and notify systemd that we are fully up and running try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: # do nothing on non-systemd platforms pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True self.log.failure() self.log.error('fatal: could not startup node') if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass def _startup(self): return self._configure_node_from_config(self._config) @inlineCallbacks def _configure_node_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from local configuration ...') # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions()) self.log.debug("controller: manhole started") # startup all workers workers = config.get('workers', []) if len(workers): self.log.info('Starting {nworkers} workers ...', nworkers=len(workers)) else: self.log.info('No workers configured!') for worker in workers: # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = u'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest') worker_type = worker['type'] # native worker processes setup if worker_type in self._native_workers: # set logname depending on native worker type worker_logname = '{} "{}"'.format(self._native_workers[worker_type]['logname'], worker_id) # any worker specific options worker_options = worker.get('options', {}) # now actually start the (native) worker .. yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions()) # setup native worker generic stuff method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError( "A native worker of type '{}' is configured but " "there is no method '{}' on {}".format(worker_type, method_name, type(self)) ) yield config_fn(worker_logname, worker_id, worker) # guest worker processes setup elif worker_type == u'guest': # now actually start the (guest) worker .. # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers # only take the options (which is part of the whole config item for the worker) yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions()) else: raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type)) self.log.info('Local node configuration applied successfully!') @inlineCallbacks def _configure_native_worker_common(self, worker_logname, worker_id, worker): # expanding PYTHONPATH of the newly started worker is now done # directly in NodeControllerSession._start_native_worker worker_options = worker.get('options', {}) if False: if 'pythonpath' in worker_options: added_paths = yield self._controller.call(u'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=CallOptions()) self.log.warn("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) # FIXME: as the CPU affinity is in the worker options, this _also_ (see above fix) # should be done directly in NodeControllerSession._start_native_worker if True: if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call(u'crossbar.worker.{}.set_cpu_affinity'.format(worker_id), worker_options['cpu_affinity'], options=CallOptions()) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) # this is fine to start after the worker has been started, as manhole is # CB developer/support feature anyways (like a vendor diagnostics port) if 'manhole' in worker: yield self._controller.call(u'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=CallOptions()) self.log.debug("{worker}: manhole started", worker=worker_logname) @inlineCallbacks def _configure_native_worker_router(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=CallOptions()) self.log.info("{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm_role'.format(worker_id), realm_id, role_id, role, options=CallOptions()) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format(self._uplink_no) self._uplink_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_realm_uplink'.format(worker_id), realm_id, uplink_id, uplink, options=CallOptions()) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=CallOptions()) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) @inlineCallbacks def _configure_native_worker_container(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # if components exit "very soon after" we try to start them, # we consider that a failure and shut our node down. We remove # this subscription 2 seconds after we're done starting # everything (see below). This is necessary as start_component # returns as soon as we've established a connection to the # component def component_exited(info): component_id = info.get("id") self.log.critical("Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = u'crossbar.worker.{}.container.on_component_stop'.format(worker_id) component_stop_sub = yield self._controller.subscribe(component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format(self._connection_no) self._connection_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=CallOptions()) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format(self._component_no) self._component_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info("{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) @inlineCallbacks def _configure_native_worker_websocket_testee(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format(self._transport_no) self._transport_no = 1 yield self._controller.call(u'crossbar.worker.{}.start_websocket_testee_transport'.format(worker_id), transport_id, transport, options=CallOptions()) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, )
class RouterWorkerSession(NativeWorkerSession): """ A native Crossbar.io worker that runs a WAMP router which can manage multiple realms, run multiple transports and links, as well as host multiple (embedded) application components. """ WORKER_TYPE = 'router' @inlineCallbacks def onJoin(self, details): """ Called when worker process has joined the node's management realm. """ yield NativeWorkerSession.onJoin(self, details, publish_ready=False) # Jinja2 templates for Web (like WS status page et al) # templates_dir = os.path.abspath(pkg_resources.resource_filename("crossbar", "web/templates")) if self.debug: log.msg("Using Web templates from {}".format(templates_dir)) self._templates = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_dir)) # factory for producing (per-realm) routers self._router_factory = RouterFactory() # factory for producing router sessions self._router_session_factory = RouterSessionFactory(self._router_factory) # map: realm ID -> RouterRealm self.realms = {} # map: realm URI -> realm ID self.realm_to_id = {} # map: transport ID -> RouterTransport self.transports = {} # map: link ID -> RouterLink self.links = {} # map: component ID -> RouterComponent self.components = {} # the procedures registered procs = [ 'get_router_realms', 'start_router_realm', 'stop_router_realm', 'get_router_realm_roles', 'start_router_realm_role', 'stop_router_realm_role', 'get_router_components', 'start_router_component', 'stop_router_component', 'get_router_transports', 'start_router_transport', 'stop_router_transport', 'get_router_links', 'start_router_link', 'stop_router_link' ] dl = [] for proc in procs: uri = '{}.{}'.format(self._uri_prefix, proc) if self.debug: log.msg("Registering procedure '{}'".format(uri)) dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details'))) regs = yield DeferredList(dl) if self.debug: log.msg("RouterWorker registered {} procedures".format(len(regs))) # NativeWorkerSession.publish_ready() yield self.publish_ready() def get_router_realms(self, details=None): """ List realms currently managed by this router. """ if self.debug: log.msg("{}.get_router_realms".format(self.__class__.__name__)) raise Exception("not implemented") def start_router_realm(self, id, config, schemas=None, details=None): """ Starts a realm managed by this router. :param id: The ID of the realm to start. :type id: str :param config: The realm configuration. :type config: dict :param schemas: An (optional) initial schema dictionary to load. :type schemas: dict """ if self.debug: log.msg("{}.start_router_realm".format(self.__class__.__name__), id, config, schemas) # URI of the realm to start realm = config['name'] # track realm rlm = RouterRealm(id, config) self.realms[id] = rlm self.realm_to_id[realm] = id # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session cfg = ComponentConfig(realm) rlm.session = RouterServiceSession(cfg, router, schemas) self._router_session_factory.add(rlm.session, authrole=u'trusted') def stop_router_realm(self, id, close_sessions=False, details=None): """ Stop a router realm. When a realm has stopped, no new session will be allowed to attach to the realm. Optionally, close all sessions currently attached to the realm. :param id: ID of the realm to stop. :type id: str :param close_sessions: If `True`, close all session currently attached. :type close_sessions: bool """ if self.debug: log.msg("{}.stop_router_realm".format(self.__class__.__name__), id, close_sessions) # FIXME raise NotImplementedError() def get_router_realm_roles(self, id, details=None): """ :param id: The ID of the router realm to list roles for. :type id: str :returns: list -- A list of roles. """ if self.debug: log.msg("{}.get_router_realm_roles".format(self.__class__.__name__), id) if id not in self.realms: raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) return self.realms[id].roles.values() def start_router_realm_role(self, id, role_id, config, details=None): """ Adds a role to a realm. :param id: The ID of the realm the role should be added to. :type id: str :param role_id: The ID of the role to add. :type role_id: str :param config: The role configuration. :type config: dict """ if self.debug: log.msg("{}.add_router_realm_role".format(self.__class__.__name__), id, role_id, config) if id not in self.realms: raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id in self.realms[id].roles: raise ApplicationError("crossbar.error.already_exists", "A role with ID '{}' already exists in realm with ID '{}'".format(role_id, id)) self.realms[id].roles[role_id] = RouterRealmRole(role_id, config) realm = self.realms[id].config['name'] self._router_factory.add_role(realm, config) def stop_router_realm_role(self, id, role_id, details=None): """ Drop a role from a realm. :param id: The ID of the realm to drop a role from. :type id: str :param role_id: The ID of the role within the realm to drop. :type role_id: str """ if self.debug: log.msg("{}.drop_router_realm_role".format(self.__class__.__name__), id, role_id) if id not in self.realms: raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(id)) if role_id not in self.realms[id].roles: raise ApplicationError("crossbar.error.no_such_object", "No role with ID '{}' in realm with ID '{}'".format(role_id, id)) del self.realms[id].roles[role_id] def get_router_components(self, details=None): """ List application components currently running (embedded) in this router. """ if self.debug: log.msg("{}.get_router_components".format(self.__class__.__name__)) res = [] for component in sorted(self._components.values(), key=lambda c: c.created): res.append({ 'id': component.id, 'created': utcstr(component.created), 'config': component.config, }) return res def start_router_component(self, id, config, details=None): """ Dynamically start an application component to run next to the router in "embedded mode". :param id: The ID of the component to start. :type id: str :param config: The component configuration. :type config: obj """ if self.debug: log.msg("{}.start_router_component".format(self.__class__.__name__), id, config) # prohibit starting a component twice # if id in self.components: emsg = "ERROR: could not start component - a component with ID '{}'' is already running (or starting)".format(id) log.msg(emsg) raise ApplicationError('crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_component(config) except Exception as e: emsg = "ERROR: invalid router component configuration ({})".format(e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: if self.debug: log.msg("Starting {}-component on router.".format(config['type'])) realm = config['realm'] cfg = ComponentConfig(realm=realm, extra=config.get('extra', None)) if config['type'] == 'class': try: klassname = config['classname'] if self.debug: log.msg("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) log.msg(emsg) log.msg("PYTHONPATH: {}".format(sys.path)) raise ApplicationError("crossbar.error.class_import_failed", emsg) elif config['type'] == 'wamplet': try: dist = config['package'] name = config['entrypoint'] if self.debug: log.msg("Starting WAMPlet '{}/{}'".format(dist, name)) # make is supposed to make instances of ApplicationSession make = pkg_resources.load_entry_point(dist, 'autobahn.twisted.wamplet', name) except Exception as e: emsg = "Failed to import wamplet '{}/{}' - {}".format(dist, name, e) log.msg(emsg) raise ApplicationError("crossbar.error.class_import_failed", emsg) else: raise ApplicationError("crossbar.error.invalid_configuration", "invalid component type '{}'".format(config['type'])) # .. and create and add an WAMP application session to # run the component next to the router # try: session = make(cfg) except Exception as e: raise ApplicationError("crossbar.error.class_import_failed", str(e)) if not isinstance(session, ApplicationSession): raise ApplicationError("crossbar.error.class_import_failed", "session not derived of ApplicationSession") self.components[id] = RouterComponent(id, config, session) self._router_session_factory.add(session, authrole=config.get('role', u'anonymous')) def stop_router_component(self, id, details=None): """ Stop an application component running on this router. :param id: The ID of the component to stop. :type id: str """ if self.debug: log.msg("{}.stop_router_component".format(self.__class__.__name__), id) if id in self._components: if self.debug: log.msg("Worker {}: stopping component {}".format(self.config.extra.worker, id)) try: # self._components[id].disconnect() self._session_factory.remove(self._components[id]) del self._components[id] except Exception as e: raise ApplicationError("crossbar.error.component.cannot_stop", "Failed to stop component {}: {}".format(id, e)) else: raise ApplicationError("crossbar.error.no_such_component", "No component {}".format(id)) def get_router_transports(self, details=None): """ List currently running transports. """ if self.debug: log.msg("{}.get_router_transports".format(self.__class__.__name__)) res = [] for transport in sorted(self.transports.values(), key=lambda c: c.created): res.append({ 'id': transport.id, 'created': utcstr(transport.created), 'config': transport.config, }) return res def start_router_transport(self, id, config, details=None): """ Start a transport on this router. :param id: The ID of the transport to start. :type id: str :param config: The transport configuration. :type config: dict """ if self.debug: log.msg("{}.start_router_transport".format(self.__class__.__name__), id, config) # prohibit starting a transport twice # if id in self.transports: emsg = "ERROR: could not start transport - a transport with ID '{}'' is already running (or starting)".format(id) log.msg(emsg) raise ApplicationError('crossbar.error.already_running', emsg) # check configuration # try: checkconfig.check_router_transport(config) except Exception as e: emsg = "ERROR: invalid router transport configuration ({})".format(e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: if self.debug: log.msg("Starting {}-transport on router.".format(config['type'])) # standalone WAMP-RawSocket transport # if config['type'] == 'rawsocket': transport_factory = WampRawSocketServerFactory(self._router_session_factory, config) transport_factory.noisy = False # standalone WAMP-WebSocket transport # elif config['type'] == 'websocket': transport_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, config, self._templates) transport_factory.noisy = False # Flash-policy file server pseudo transport # elif config['type'] == 'flashpolicy': transport_factory = FlashPolicyFactory(config.get('allowed_domain', None), config.get('allowed_ports', None)) # WebSocket testee pseudo transport # elif config['type'] == 'websocket.testee': transport_factory = WebSocketTesteeServerFactory(config, self._templates) # Stream testee pseudo transport # elif config['type'] == 'stream.testee': transport_factory = StreamTesteeServerFactory() # Twisted Web based transport # elif config['type'] == 'web': options = config.get('options', {}) # create Twisted Web root resource # root_config = config['paths']['/'] root_type = root_config['type'] root_options = root_config.get('options', {}) # Static file hierarchy root resource # if root_type == 'static': if 'directory' in root_config: root_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, root_config['directory'])) elif 'package' in root_config: if 'resource' not in root_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(root_config['package']) except ImportError as e: emsg = "ERROR: could not import resource '{}' from package '{}' - {}".format(root_config['resource'], root_config['package'], e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: try: root_dir = os.path.abspath(pkg_resources.resource_filename(root_config['package'], root_config['resource'])) except Exception as e: emsg = "ERROR: could not import resource '{}' from package '{}' - {}".format(root_config['resource'], root_config['package'], e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: mod_version = getattr(mod, '__version__', '?.?.?') log.msg("Loaded static Web resource '{}' from package '{} {}' (filesystem path {})".format(root_config['resource'], root_config['package'], mod_version, root_dir)) else: raise ApplicationError("crossbar.error.invalid_configuration", "missing web spec") root_dir = root_dir.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 if self.debug: log.msg("Starting Web service at root directory {}".format(root_dir)) # create resource for file system hierarchy # if root_options.get('enable_directory_listing', False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = root_options.get('cache_timeout', DEFAULT_CACHE_TIMEOUT) root = static_resource_class(root_dir, cache_timeout=cache_timeout) # set extra MIME types # root.contentTypes.update(EXTRA_MIME_TYPES) if 'mime_types' in root_options: root.contentTypes.update(root_options['mime_types']) patchFileContentTypes(root) # render 404 page on any concrete path not found # root.childNotFound = Resource404(self._templates, root_dir) # WSGI root resource # elif root_type == 'wsgi': if not _HAS_WSGI: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI unsupported") # wsgi_options = root_config.get('options', {}) if 'module' not in root_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing WSGI app module") if 'object' not in root_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = root_config['module'] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path)) else: obj_name = root_config['object'] if obj_name not in mod.__dict__: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name)) else: app = getattr(mod, obj_name) # create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), app) except Exception as e: raise ApplicationError("crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e)) else: # create a root resource serving everything via WSGI root = WSGIRootResource(wsgi_resource, {}) # Redirecting root resource # elif root_type == 'redirect': redirect_url = root_config['url'].encode('ascii', 'ignore') root = RedirectResource(redirect_url) # Publisher resource (part of REST-bridge) # elif root_type == 'publisher': # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=root_config['realm'], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publishing session to the router # self._router_session_factory.add(publisher_session, authrole=root_config.get('role', 'anonymous')) # now create the publisher Twisted Web resource and add it to resource tree # root = PublisherResource(root_config.get('options', {}), publisher_session) # Caller resource (part of REST-bridge) # elif root_type == 'caller': # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=root_config['realm'], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=root_config.get('role', 'anonymous')) # now create the caller Twisted Web resource and add it to resource tree # root = CallerResource(root_config.get('options', {}), caller_session) # Generic Twisted Web resource # elif root_type == 'resource': try: klassname = root_config['classname'] if self.debug: log.msg("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) root = make(root_config.get('extra', {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) log.msg(emsg) log.msg("PYTHONPATH: {}".format(sys.path)) raise ApplicationError("crossbar.error.class_import_failed", emsg) # Invalid root resource # else: raise ApplicationError("crossbar.error.invalid_configuration", "invalid Web root path type '{}'".format(root_type)) # create Twisted Web resources on all non-root paths configured # self.add_paths(root, config.get('paths', {})) # create the actual transport factory # transport_factory = Site(root) transport_factory.noisy = False # Web access logging # if not options.get('access_log', False): transport_factory.log = lambda _: None # Traceback rendering # transport_factory.displayTracebacks = options.get('display_tracebacks', False) # HSTS # if options.get('hsts', False): if 'tls' in config['endpoint']: hsts_max_age = int(options.get('hsts_max_age', 31536000)) transport_factory.requestFactory = createHSTSRequestFactory(transport_factory.requestFactory, hsts_max_age) else: log.msg("Warning: HSTS requested, but running on non-TLS - skipping HSTS") # enable Hixie-76 on Twisted Web # if options.get('hixie76_aware', False): transport_factory.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported # Unknown transport type # else: # should not arrive here, since we did check_transport() in the beginning raise Exception("logic error") # create transport endpoint / listening port from transport factory # d = create_listening_port_from_config(config['endpoint'], transport_factory, self.config.extra.cbdir, reactor) def ok(port): self.transports[id] = RouterTransport(id, config, transport_factory, port) if self.debug: log.msg("Router transport '{}'' started and listening".format(id)) return def fail(err): emsg = "ERROR: cannot listen on transport endpoint ({})".format(err.value) log.msg(emsg) raise ApplicationError("crossbar.error.cannot_listen", emsg) d.addCallbacks(ok, fail) return d def add_paths(self, resource, paths): """ Add all configured non-root paths under a resource. :param resource: The parent resource under which to add paths. :type resource: Resource :param paths: The path configurations. :type paths: dict """ for path in sorted(paths): if isinstance(path, six.text_type): webPath = path.encode('utf8') else: webPath = path if path != b"/": resource.putChild(webPath, self.create_resource(paths[path])) def create_resource(self, path_config): """ Creates child resource to be added to the parent. :param path_config: Configuration for the new child resource. :type path_config: dict :returns: Resource -- the new child resource """ # WAMP-WebSocket resource # if path_config['type'] == 'websocket': ws_factory = WampWebSocketServerFactory(self._router_session_factory, self.config.extra.cbdir, path_config, self._templates) # FIXME: Site.start/stopFactory should start/stop factories wrapped as Resources ws_factory.startFactory() return WebSocketResource(ws_factory) # Static file hierarchy resource # elif path_config['type'] == 'static': static_options = path_config.get('options', {}) if 'directory' in path_config: static_dir = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) elif 'package' in path_config: if 'resource' not in path_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing resource") try: mod = importlib.import_module(path_config['package']) except ImportError as e: emsg = "ERROR: could not import resource '{}' from package '{}' - {}".format(path_config['resource'], path_config['package'], e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: try: static_dir = os.path.abspath(pkg_resources.resource_filename(path_config['package'], path_config['resource'])) except Exception as e: emsg = "ERROR: could not import resource '{}' from package '{}' - {}".format(path_config['resource'], path_config['package'], e) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) else: raise ApplicationError("crossbar.error.invalid_configuration", "missing web spec") static_dir = static_dir.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 # create resource for file system hierarchy # if static_options.get('enable_directory_listing', False): static_resource_class = StaticResource else: static_resource_class = StaticResourceNoListing cache_timeout = static_options.get('cache_timeout', DEFAULT_CACHE_TIMEOUT) static_resource = static_resource_class(static_dir, cache_timeout=cache_timeout) # set extra MIME types # static_resource.contentTypes.update(EXTRA_MIME_TYPES) if 'mime_types' in static_options: static_resource.contentTypes.update(static_options['mime_types']) patchFileContentTypes(static_resource) # render 404 page on any concrete path not found # static_resource.childNotFound = Resource404(self._templates, static_dir) return static_resource # WSGI resource # elif path_config['type'] == 'wsgi': if not _HAS_WSGI: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI unsupported") # wsgi_options = path_config.get('options', {}) if 'module' not in path_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing WSGI app module") if 'object' not in path_config: raise ApplicationError("crossbar.error.invalid_configuration", "missing WSGI app object") # import WSGI app module and object mod_name = path_config['module'] try: mod = importlib.import_module(mod_name) except ImportError as e: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI app module '{}' import failed: {} - Python search path was {}".format(mod_name, e, sys.path)) else: obj_name = path_config['object'] if obj_name not in mod.__dict__: raise ApplicationError("crossbar.error.invalid_configuration", "WSGI app object '{}' not in module '{}'".format(obj_name, mod_name)) else: app = getattr(mod, obj_name) # create a Twisted Web WSGI resource from the user's WSGI application object try: wsgi_resource = WSGIResource(reactor, reactor.getThreadPool(), app) except Exception as e: raise ApplicationError("crossbar.error.invalid_configuration", "could not instantiate WSGI resource: {}".format(e)) else: return wsgi_resource # Redirecting resource # elif path_config['type'] == 'redirect': redirect_url = path_config['url'].encode('ascii', 'ignore') return RedirectResource(redirect_url) # JSON value resource # elif path_config['type'] == 'json': value = path_config['value'] return JsonResource(value) # CGI script resource # elif path_config['type'] == 'cgi': cgi_processor = path_config['processor'] cgi_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) cgi_directory = cgi_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 return CgiDirectory(cgi_directory, cgi_processor, Resource404(self._templates, cgi_directory)) # WAMP-Longpoll transport resource # elif path_config['type'] == 'longpoll': path_options = path_config.get('options', {}) lp_resource = WampLongPollResource(self._router_session_factory, timeout=path_options.get('request_timeout', 10), killAfter=path_options.get('session_timeout', 30), queueLimitBytes=path_options.get('queue_limit_bytes', 128 * 1024), queueLimitMessages=path_options.get('queue_limit_messages', 100), debug=path_options.get('debug', False), debug_transport_id=path_options.get('debug_transport_id', None) ) lp_resource._templates = self._templates return lp_resource # Publisher resource (part of REST-bridge) # elif path_config['type'] == 'publisher': # create a vanilla session: the publisher will use this to inject events # publisher_session_config = ComponentConfig(realm=path_config['realm'], extra=None) publisher_session = ApplicationSession(publisher_session_config) # add the publisher session to the router # self._router_session_factory.add(publisher_session, authrole=path_config.get('role', 'anonymous')) # now create the publisher Twisted Web resource # return PublisherResource(path_config.get('options', {}), publisher_session) # Caller resource (part of REST-bridge) # elif path_config['type'] == 'caller': # create a vanilla session: the caller will use this to inject calls # caller_session_config = ComponentConfig(realm=path_config['realm'], extra=None) caller_session = ApplicationSession(caller_session_config) # add the calling session to the router # self._router_session_factory.add(caller_session, authrole=path_config.get('role', 'anonymous')) # now create the caller Twisted Web resource # return CallerResource(path_config.get('options', {}), caller_session) # File Upload resource # elif path_config['type'] == 'upload': upload_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['directory'])) upload_directory = upload_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 if not os.path.isdir(upload_directory): emsg = "configured upload directory '{}' in file upload resource isn't a directory".format(upload_directory) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) if 'temp_directory' in path_config: temp_directory = os.path.abspath(os.path.join(self.config.extra.cbdir, path_config['temp_directory'])) temp_directory = temp_directory.encode('ascii', 'ignore') # http://stackoverflow.com/a/20433918/884770 else: temp_directory = os.path.abspath(tempfile.gettempdir()) temp_directory = os.path.join(temp_directory, 'crossbar-uploads') if not os.path.exists(temp_directory): os.makedirs(temp_directory) if not os.path.isdir(temp_directory): emsg = "configured temp directory '{}' in file upload resource isn't a directory".format(temp_directory) log.msg(emsg) raise ApplicationError("crossbar.error.invalid_configuration", emsg) # file upload progress and finish events are published via this session # upload_session_config = ComponentConfig(realm=path_config['realm'], extra=None) upload_session = ApplicationSession(upload_session_config) self._router_session_factory.add(upload_session, authrole=path_config.get('role', 'anonymous')) return FileUploadResource(upload_directory, temp_directory, path_config['form_fields'], upload_session, path_config.get('options', {})) # Generic Twisted Web resource # elif path_config['type'] == 'resource': try: klassname = path_config['classname'] if self.debug: log.msg("Starting class '{}'".format(klassname)) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) make = getattr(module, klass_name) return make(path_config.get('extra', {})) except Exception as e: emsg = "Failed to import class '{}' - {}".format(klassname, e) log.msg(emsg) log.msg("PYTHONPATH: {}".format(sys.path)) raise ApplicationError("crossbar.error.class_import_failed", emsg) # Schema Docs resource # elif path_config['type'] == 'schemadoc': realm = path_config['realm'] if realm not in self.realm_to_id: raise ApplicationError("crossbar.error.no_such_object", "No realm with URI '{}' configured".format(realm)) realm_id = self.realm_to_id[realm] realm_schemas = self.realms[realm_id].session._schemas return SchemaDocResource(self._templates, realm, realm_schemas) # Nested subpath resource # elif path_config['type'] == 'path': nested_paths = path_config.get('paths', {}) if '/' in nested_paths: nested_resource = self.create_resource(nested_paths['/']) else: nested_resource = Resource() # nest subpaths under the current entry # self.add_paths(nested_resource, nested_paths) return nested_resource else: raise ApplicationError("crossbar.error.invalid_configuration", "invalid Web path type '{}'".format(path_config['type'])) def stop_router_transport(self, id, details=None): """ Stop a transport on this router on this router. :param id: The ID of the transport to stop. :type id: dict """ if self.debug: log.msg("{}.stop_router_transport".format(self.__class__.__name__), id) # FIXME if id not in self.transports: # if not id in self.transports or self.transports[id].status != 'started': emsg = "ERROR: cannot stop transport - no transport with ID '{}' (or already stopping)".format(id) log.msg(emsg) raise ApplicationError('crossbar.error.not_running', emsg) if self.debug: log.msg("Stopping transport with ID '{}'".format(id)) d = self.transports[id].port.stopListening() def ok(_): del self.transports[id] def fail(err): raise ApplicationError("crossbar.error.cannot_stop", "Failed to stop transport: {}".format(str(err.value))) d.addCallbacks(ok, fail) return d def get_router_links(self, details=None): """ List currently running router links. """ if self.debug: log.msg("{}.get_router_links".format(self.__class__.__name__)) raise NotImplementedError() def start_router_link(self, id, config, details=None): """ Start a link on this router. :param id: The ID of the link to start. :type id: str :param config: The link configuration. :type config: dict """ if self.debug: log.msg("{}.start_router_link".format(self.__class__.__name__), id, config) raise NotImplementedError() def stop_router_link(self, id, details=None): """ Stop a link on this router. :param id: The ID of the link to stop. :type id: str """ if self.debug: log.msg("{}.stop_router_link".format(self.__class__.__name__), id) raise NotImplementedError()
class TestDealer(unittest.TestCase): """ """ def setUp(self): """ Setup router and router session factories. """ # create a router factory self.router_factory = RouterFactory() # start a realm self.realm = RouterRealm(u'realm-001', {u'name': u'realm1'}) self.router_factory.start_realm(self.realm) # allow everything self.router = self.router_factory.get(u'realm1') self.router.add_role( RouterRoleStaticAuth( self.router, u'test_role', default_permissions={ u'uri': u'com.example.', u'match': u'prefix', u'allow': { u'call': True, u'register': True, u'publish': True, u'subscribe': True, } } ) ) # create a router session factory self.session_factory = RouterSessionFactory(self.router_factory) def tearDown(self): pass @defer.inlineCallbacks def test_outstanding_invoke(self): """ When a call is pending and the callee goes away, it cancels the in-flight call """ session = mock.Mock() session._realm = u'realm1' self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) rap = RouterApplicationSession(session, self.router_factory) rap.send(message.Hello(u"realm1", {u'caller': role.RoleCallerFeatures()})) rap.send(message.Register(1, u'foo')) # we can retrieve the Registration via # session.mock_calls[-1][1][0] if req'd # re-set the authorize, as the Deferred from above is already # used-up and it gets called again to authorize the Call self.router.authorize = mock.Mock( return_value=defer.succeed({u'allow': True, u'disclose': True}) ) rap.send(message.Call(42, u'foo')) orig = rap.send d = defer.Deferred() rap.send(message.Goodbye()) def wrapper(*args, **kw): d.callback(args[0]) return orig(*args, **kw) rap.send = wrapper # we can do this *after* the call to send() the Goodbye # (above) because it takes a reactor-turn to actually # process the cancel/errors etc -- hence the Deferred and # yield in this test... msg = yield d self.assertEqual(42, msg.request) self.assertEqual(u'wamp.error.canceled', msg.error)