def test_start_router_component_invalid_type(self): """ Trying to start a component with an invalid type fails. """ log_list = [] r = router.RouterWorkerSession(config=self.config, reactor=reactor) r.log = make_logger(observer=log_list.append, log_level="debug") # Open the transport transport = FakeWAMPTransport(r) r.onOpen(transport) realm_config = { u"name": u"realm1", u'roles': [] } r.start_router_realm(u"realm1", realm_config) component_config = { u"type": u"notathingcrossbarsupports", u"realm": u"realm1" } with self.assertRaises(ApplicationError) as e: r.start_router_component("newcomponent", component_config) self.assertEqual(e.exception.error, u"crossbar.error.invalid_configuration") self.assertEqual(len(r.get_router_components()), 0)
def _run_command_keys(options, reactor, personality): """ Subcommand "crossbar keys". """ log = make_logger() from crossbar.common.key import _read_node_key from crossbar.common.key import _read_release_key if options.generate: # Generate a new node key pair (2 files), load and check _maybe_generate_key(options.cbdir) else: # Print keys # Release (public) key release_pubkey = _read_release_key() # Node key node_key = _read_node_key(options.cbdir, private=options.private) if options.private: key_title = 'Crossbar.io Node PRIVATE Key' else: key_title = 'Crossbar.io Node PUBLIC Key' log.info('') log.info('{key_title}', key_title=hl('Crossbar Software Release Key', color='yellow', bold=True)) log.info('base64: {release_pubkey}', release_pubkey=release_pubkey[u'base64']) log.info(release_pubkey[u'qrcode'].strip()) log.info('') log.info('{key_title}', key_title=hl(key_title, color='yellow', bold=True)) log.info('hex: {node_key}', node_key=node_key[u'hex']) log.info(node_key[u'qrcode'].strip()) log.info('')
def __init__(self, options, session, auth_config=None): """ Ctor. :param options: Options for path service from configuration. :type options: dict :param session: Instance of `ApplicationSession` to be used for forwarding events. :type session: obj """ Resource.__init__(self) self._options = options self._session = session self.log = make_logger() self._key = None if 'key' in options: self._key = options['key'].encode('utf8') self._secret = None if 'secret' in options: self._secret = options['secret'].encode('utf8') self._post_body_limit = int(options.get('post_body_limit', 0)) self._timestamp_delta_limit = int(options.get('timestamp_delta_limit', 300)) self._require_ip = None if 'require_ip' in options: self._require_ip = [ip_network(net) for net in options['require_ip']] self._require_tls = options.get('require_tls', None) self._auth_config = auth_config or {} self._pending_auth = None
def _run_command_init(options, reactor, personality): """ Subcommand "crossbar init". """ log = make_logger() if options.appdir is None: options.appdir = '.' options.appdir = os.path.abspath(options.appdir) cbdir = os.path.join(options.appdir, '.crossbar') if os.path.exists(options.appdir): log.warn("Application directory '{appdir}' already exists!", appdir=options.appdir) else: try: os.mkdir(options.appdir) except Exception as e: raise Exception("could not create application directory '{}' ({})".format(options.appdir, e)) else: log.info("Crossbar.io application directory '{appdir}' created", appdir=options.appdir) log.info("Initializing application directory '{options.appdir}' ..", options=options) get_started_hint = Templates.init(options.appdir, template='default') _maybe_generate_key(cbdir) log.info("Application directory initialized") if get_started_hint: log.info("\n{hint}\n", hint=get_started_hint) else: log.info("\nTo start your node, run 'crossbar start --cbdir {cbdir}'\n", cbdir=os.path.abspath(cbdir))
def run(components, log_level='info'): """ High-level API to run a series of components. This will only return once all the components have stopped (including, possibly, after all re-connections have failed if you have re-connections enabled). Under the hood, this calls XXX fixme for asyncio -- if you wish to manage the loop loop yourself, use the :meth:`autobahn.asyncio.component.Component.start` method to start each component yourself. :param components: the Component(s) you wish to run :type components: Component or list of Components :param log_level: a valid log-level (or None to avoid calling start_logging) :type log_level: string """ # actually, should we even let people "not start" the logging? I'm # not sure that's wise... (double-check: if they already called # txaio.start_logging() what happens if we call it again?) if log_level is not None: txaio.start_logging(level=log_level) loop = asyncio.get_event_loop() log = txaio.make_logger() # see https://github.com/python/asyncio/issues/341 asyncio has # "odd" handling of KeyboardInterrupt when using Tasks (as # run_until_complete does). Another option is to just resture # default SIGINT handling, which is to exit: # import signal # signal.signal(signal.SIGINT, signal.SIG_DFL) @asyncio.coroutine def exit(): return loop.stop() def nicely_exit(signal): log.info("Shutting down due to {signal}", signal=signal) for task in asyncio.Task.all_tasks(): task.cancel() asyncio.ensure_future(exit()) loop.add_signal_handler(signal.SIGINT, partial(nicely_exit, 'SIGINT')) loop.add_signal_handler(signal.SIGTERM, partial(nicely_exit, 'SIGTERM')) # returns a future; could run_until_complete() but see below component._run(loop, components) try: loop.run_forever() # this is probably more-correct, but then you always get # "Event loop stopped before Future completed": # loop.run_until_complete(f) except asyncio.CancelledError: pass
def _appsession_loader(config): """ Load a class or a WAMPlet from C{config}. """ log = make_logger() if config['type'] == 'class': try: klassname = config['classname'] log.debug("Starting class '{klass}'", klass=klassname) c = klassname.split('.') module_name, klass_name = '.'.join(c[:-1]), c[-1] module = importlib.import_module(module_name) component = getattr(module, klass_name) if not issubclass(component, ApplicationSession): raise ApplicationError( u"crossbar.error.class_import_failed", "session not derived of ApplicationSession" ) except Exception: emsg = "Failed to import class '{}'\n{}".format( klassname, Failure().getTraceback()) log.debug(emsg) log.debug("PYTHONPATH: {pythonpath}", pythonpath=sys.path) raise ApplicationError( u"crossbar.error.class_import_failed", emsg, pythonpath=sys.path ) elif config['type'] == 'wamplet': try: dist = config['package'] name = config['entrypoint'] log.debug("Starting WAMPlet '{dist}/{name}'", dist=dist, name=name) # component is supposed to make instances of ApplicationSession component = pkg_resources.load_entry_point( dist, 'autobahn.twisted.wamplet', name) except Exception: emsg = "Failed to import wamplet '{}/{}'\n{}".format( dist, name, Failure().getTraceback()) log.error(emsg) raise ApplicationError(u"crossbar.error.class_import_failed", emsg) else: raise ApplicationError( u"crossbar.error.invalid_configuration", "invalid component type '{}'".format(config['type']) ) return component
def test_log_noop_trace(handler, framework): # trace should be a no-op, because we set the level to 'debug' in # the fixture logger = txaio.make_logger() logger.trace("a trace message") assert len(handler.messages) == 0
def log_started(framework): """ Sets up the logging, which we can only do once per run. """ early_log = txaio.make_logger() early_log.info("early log") txaio.start_logging(out=_handler, level='debug')
def test_set_global_changes_loggers(self): """ Setting the global log level changes the level of all loggers that were not instantiated with a level. """ log = make_logger() self.assertEqual(log._log_level, "info") set_global_log_level("warn") self.assertEqual(log._log_level, "warn")
def test_set_global_does_not_change_explicit_loggers(self): """ Setting the global log level does not change loggers that have an explicit level set. """ log = make_logger("info") self.assertEqual(log._log_level, "info") set_global_log_level("warn") self.assertEqual(log._log_level, "info")
def test_basic(self): stream = NativeStringIO() observer = make_stderr_observer(_file=stream) log = make_logger(observer=observer) log.error("Hi!", log_system="foo") result = stream.getvalue() self.assertIn(u"[foo]", result)
def log_started(): """ Sets up the logging, which we can only do once per run. """ early_log = txaio.make_logger() early_log.info("early log") handler = TestHandler() txaio.start_logging(out=handler, level='debug') return handler
def test_info(handler, framework): logger = txaio.make_logger() # do something a little fancy, with attribute access etc. logger.info( "{adjective} {nouns[1]}", adjective='hilarious', nouns=['skunk', 'elephant', 'wombat'], ) assert len(handler.messages) == 1 assert handler.messages[0].endswith(b"hilarious elephant")
def test_logger_failure(self): """ The failure method catches the in-flight exception. """ log = make_logger("info", logger=Mock) try: 1 / 0 except: log.failure("Failure happened!") self.assertEqual(log._logger.failure.call_count, 1)
def test_logger_failure_not_called(self): """ The failure method isn't called under 'none'. """ log = make_logger("none", logger=Mock) try: 1 / 0 except: log.failure("Failure happened!") self.assertEqual(log._logger.failure.call_count, 0)
def test_capturer(self): """ The log capturer is a context manager that captures the logs emitted inside it. """ log = make_logger("info") with LogCapturer() as l: log.info("Whee!", log_category="CB500", foo="bar") self.assertEqual(len(l.get_category("CB500")), 1) self.assertEqual(l.get_category("CB500")[0]["foo"], "bar")
def test_critical(handler): logger = txaio.make_logger() # do something a little fancy, with attribute access etc. logger.critical( "{adjective} {nouns[2]}", adjective='hilarious', nouns=['skunk', 'elephant', 'wombat'], ) assert len(handler.messages) == 1 assert handler.messages[0].endswith(b"hilarious wombat")
def __init__(self, controller, id, who, keeplog=None): """ Ctor. :param controller: The node controller this worker was created by. :type controller: instance of NodeController :param id: The ID of the worker. :type id: str :param who: Who triggered creation of this worker. :type who: str :param keeplog: If not `None`, buffer log message received to be later retrieved via getlog(). If `0`, keep infinite log internally. If `> 0`, keep at most such many log entries in buffer. :type keeplog: int or None """ self._logger = make_logger() self._controller = controller self.id = id self.who = who self.pid = None self.status = u'starting' self.created = datetime.utcnow() self.connected = None self.started = None self.proto = None self.pinfo = None self._log_entries = deque(maxlen=10) if platform.isWindows(): self._log_fds = [2] else: self._log_fds = [1, 2] self._log_lineno = 0 self._log_topic = u'crossbar.worker.{}.on_log'.format(self.id) self._log_rich = None # Does not support rich logs # track stats for worker->controller traffic self._stats = {} self._stats_printer = None # A deferred that resolves when the worker is ready. self.ready = Deferred() # A deferred that resolves when the worker has exited. self.exit = Deferred() self.exit.addBoth(self._dump_remaining_log)
def install_reactor(explicit_reactor=None, verbose=False, log=None, require_optimal_reactor=True): """ Install Twisted reactor. :param explicit_reactor: If provided, install this reactor. Else, install the optimal reactor. :type explicit_reactor: obj :param verbose: If ``True``, log (at level "info") the reactor that is in place afterwards. :type verbose: bool :param log: Explicit logging to this txaio logger object. :type log: obj :param require_optimal_reactor: If ``True`` and the desired reactor could not be installed, raise ``ReactorAlreadyInstalledError``, else fallback to another reactor. :type require_optimal_reactor: bool :returns: The Twisted reactor in place (`twisted.internet.reactor`). """ if not log: log = txaio.make_logger() if explicit_reactor: # install explicitly given reactor # from twisted.application.reactors import installReactor if verbose: log.info('Trying to install explicitly specified Twisted reactor "{reactor}" ..', reactor=explicit_reactor) try: installReactor(explicit_reactor) except: log.failure('Could not install Twisted reactor {reactor}\n{log_failure.value}', reactor=explicit_reactor) sys.exit(1) else: # automatically choose optimal reactor # if verbose: log.info('Automatically choosing optimal Twisted reactor ..') install_optimal_reactor(require_optimal_reactor) # now the reactor is installed, import it from twisted.internet import reactor txaio.config.loop = reactor if verbose: from twisted.python.reflect import qual log.info('Running on Twisted reactor {reactor}', reactor=qual(reactor.__class__)) return reactor
def test_trace(handler, framework): logger = txaio.make_logger() old_log = txaio.get_global_log_level() txaio.set_global_log_level("trace") # the txaio_trace variable should be in it logger.trace( "trace {txaio_trace}", ) txaio.set_global_log_level(old_log) assert len(handler.messages) == 1 assert handler.messages[0].endswith(b"trace True")
def test_emit_noop(handler, framework): """ emit() with a too-low level is an no-op. """ logger = txaio.make_logger() old_log = txaio.get_global_log_level() txaio.set_global_log_level("info") logger.emit("debug", "foobar") txaio.set_global_log_level(old_log) assert len(handler.messages) == 0
def test_basic(self): """ We can instantiate a RouterWorkerSession. """ log_list = [] r = router.RouterWorkerSession(config=self.config, reactor=reactor) r.log = make_logger(observer=log_list.append, log_level="debug") # Open the transport transport = FakeWAMPTransport(r) r.onOpen(transport) # XXX depends on log-text; perhaps a little flaky... self.assertIn("running as", log_list[-1]["log_format"])
def test_logger_emits(self): """ A Logger emits messages through to its child logger. """ log = make_logger("trace", logger=Mock) log.error("Foo happened!!!") log._logger.emit.assert_called_with(LogLevel.error, "Foo happened!!!") log.warn("Stuff", foo="bar") log._logger.emit.assert_called_with(LogLevel.warn, "Stuff", foo="bar") log.trace("Stuff that's trace", foo="bar") log._logger.emit.assert_called_with(LogLevel.debug, "Stuff that's trace", foo="bar", txaio_trace=1)
def test_debug_with_object(handler, framework): logger = txaio.make_logger() class Shape(object): sides = 4 name = "bamboozle" config = dict(foo='bar') logger.info( "{what.config[foo]} {what.sides} {what.name}", what=Shape(), ) assert len(handler.messages) == 1 assert handler.messages[0].endswith(b"bar 4 bamboozle")
def _run(reactor, components): if isinstance(components, Component): components = [components] if type(components) != list: raise ValueError( '"components" must be a list of Component objects - encountered' ' {0}'.format(type(components)) ) for c in components: if not isinstance(c, Component): raise ValueError( '"components" must be a list of Component objects - encountered' 'item of type {0}'.format(type(c)) ) log = txaio.make_logger() def component_success(c, arg): log.debug("Component {c} successfully completed: {arg}", c=c, arg=arg) return arg def component_failure(f): log.error("Component error: {msg}", msg=txaio.failure_message(f)) log.debug("Component error: {tb}", tb=txaio.failure_format_traceback(f)) return None # all components are started in parallel dl = [] for c in components: # a component can be of type MAIN or SETUP d = c.start(reactor) txaio.add_callbacks(d, partial(component_success, c), component_failure) dl.append(d) d = txaio.gather(dl, consume_exceptions=False) def all_done(arg): log.debug("All components ended; stopping reactor") try: reactor.stop() except ReactorNotRunning: pass txaio.add_callbacks(d, all_done, all_done) return d
def test_emit_ok(handler, framework): """ emit() with an OK level emits the message. """ logger = txaio.make_logger() old_log = txaio.get_global_log_level() txaio.set_global_log_level("trace") logger.emit("trace", "foobar") logger.emit("info", "barbaz") txaio.set_global_log_level(old_log) assert len(handler.messages) == 2 assert handler.messages[0].endswith(b"foobar") assert handler.messages[1].endswith(b"barbaz")
def test_basic(self): """ The JSON observer outputs a stream of log events. """ stream = StringIO() observer = make_JSON_observer(stream) log = make_logger(observer=observer) log.info("Hello") result = stream.getvalue() log_entry = json.loads(result[:-1]) self.assertEqual(result[-1], record_separator) self.assertEqual(len(log_entry.keys()), 4) self.assertEqual(log_entry["level"], u"info") self.assertEqual(log_entry["text"], u"Hello")
def run_command_keys(options, reactor=None, **kwargs): """ Subcommand "crossbar keys". """ log = make_logger() # Release (public) key release_pubkey = _read_release_pubkey() # Node (public) key node_pubkey = _read_node_pubkey(options.cbdir) log.info(release_pubkey[u'qrcode']) log.info(' Release key: {release_pubkey}', release_pubkey=release_pubkey[u'base64']) log.info('') log.info(node_pubkey[u'qrcode']) log.info(' Node key: {node_pubkey}', node_pubkey=node_pubkey[u'hex']) log.info('')
def test_categories_subsequent(handler, framework): """ Later calls to add_log_categories update the list of log categories and take precedence. """ logger = txaio.make_logger() txaio.add_log_categories({"TX100": u"{adjective} {nouns[2]}"}) txaio.add_log_categories({"TX100": u"{adjective} {nouns[1]}"}) # do something a little fancy, with attribute access etc. logger.critical( log_category="TX100", adjective='hilarious', nouns=['skunk', 'elephant', 'wombat'], ) assert len(handler.messages) == 1 assert handler.messages[0].endswith(b"hilarious elephant")
def install_reactor(explicit_reactor=None, verbose=False): """ Install Twisted reactor. :param explicit_reactor: If provided, install this reactor. Else, install the optimal reactor. :type explicit_reactor: obj :param verbose: If ``True``, print what happens. :type verbose: bool """ import sys import txaio txaio.use_twisted() # just to be sure... log = make_logger() if explicit_reactor: # install explicitly given reactor ## from twisted.application.reactors import installReactor log.info("Trying to install explicitly specified Twisted reactor '{reactor}'", reactor=explicit_reactor) try: installReactor(explicit_reactor) except: log.failure("Could not install Twisted reactor {reactor}\n{log_failure.value}", reactor=explicit_reactor) sys.exit(1) else: # automatically choose optimal reactor ## log.debug("Automatically choosing optimal Twisted reactor") install_optimal_reactor(verbose) # now the reactor is installed, import it from twisted.internet import reactor txaio.config.loop = reactor if verbose: from twisted.python.reflect import qual log.debug("Running Twisted reactor {reactor}", reactor=qual(reactor.__class__)) return reactor
def _check_is_running(cbdir): """ Check if a Crossbar.io node is already running on a Crossbar.io node directory. :param cbdir: The Crossbar.io node directory to check. :type cbdir: str :returns: The PID of the running Crossbar.io controller process or ``None`` :rtype: int or None """ log = make_logger() remove_PID_type = None remove_PID_reason = None fp = os.path.join(cbdir, _PID_FILENAME) if os.path.isfile(fp): with open(fp) as fd: pid_data_str = fd.read() try: pid_data = json.loads(pid_data_str) pid = int(pid_data['pid']) except ValueError: remove_PID_type = "corrupt" remove_PID_reason = "corrupt .pid file" else: if pid == os.getpid(): # the process ID is our own -- this happens often when the Docker container is # shut down uncleanly return None elif sys.platform == 'win32' and not _HAS_PSUTIL: # when on Windows, and we can't actually determine if the PID exists, # just assume it exists return pid_data else: pid_exists = _check_pid_exists(pid) if pid_exists: if _HAS_PSUTIL: # additionally check this is actually a crossbar process p = psutil.Process(pid) cmdline = p.cmdline() if not _is_crossbar_process(cmdline): nicecmdline = ' '.join(cmdline) if len(nicecmdline) > 76: nicecmdline = nicecmdline[:38] + ' ... ' + nicecmdline[-38:] log.info('"{fp}" points to PID {pid} which is not a crossbar process:', fp=fp, pid=pid) log.info(' {cmdline}', cmdline=nicecmdline) log.info('Verify manually and either kill {pid} or delete {fp}', pid=pid, fp=fp) return None return pid_data else: remove_PID_type = "stale" remove_PID_reason = "pointing to non-existing process with PID {}".format(pid) if remove_PID_type: # If we have to remove a PID, do it here. try: os.remove(fp) except: log.info(("Could not remove {pidtype} Crossbar.io PID file " "({reason}) {fp} - {log_failure}"), pidtype=remove_PID_type, reason=remove_PID_reason, fp=fp) else: log.info("{pidtype} Crossbar.io PID file ({reason}) {fp} removed", pidtype=remove_PID_type.title(), reason=remove_PID_reason, fp=fp) return None
class WampRawSocketProtocol(Int32StringReceiver): """ Base class for Twisted-based WAMP-over-RawSocket protocols. """ log = txaio.make_logger() peer = None peer_transport = None def __init__(self): # set the RawSocket maximum message size by default self._max_message_size = 2**24 def lengthLimitExceeded(self, length): # override hook in Int32StringReceiver base class that is fired when a message is (to be) received # that is larger than what we agreed to handle (by negotiation in the RawSocket opening handshake) emsg = 'RawSocket connection: length of received message exceeded (message was {} bytes, but current maximum is {} bytes)'.format( length, self.MAX_LENGTH) raise PayloadExceededError(emsg) def connectionMade(self): self.log.debug('{klass}.connectionMade()', klass=self.__class__.__name__) # the peer we are connected to # try: self.peer = peer2str(self.transport.getPeer()) except AttributeError: # ProcessProtocols lack getPeer() self.peer = 'process:{}'.format(self.transport.pid) self.peer_transport = 'rawsocket' # a Future/Deferred that fires when we hit STATE_CLOSED self.is_closed = txaio.create_future() # this will hold an ApplicationSession object # once the RawSocket opening handshake has been # completed # self._session = None # Will hold the negotiated serializer once the opening handshake is complete # self._serializer = None # Will be set to True once the opening handshake is complete # self._handshake_complete = False # Buffer for opening handshake received bytes. # self._handshake_bytes = b'' # Peer requested to _receive_ this maximum length of serialized messages - hence we must not send larger msgs! # self._max_len_send = None def _on_handshake_complete(self): try: self._session = self.factory._factory() self._session.onOpen(self) except Exception as e: # Exceptions raised in onOpen are fatal .. self.log.warn( "{klass}._on_handshake_complete(): ApplicationSession constructor / onOpen raised ({err})", klass=self.__class__.__name__, err=e) self.abort() else: self.log.debug("ApplicationSession started.") def connectionLost(self, reason): self.log.debug('{klass}.connectionLost(reason="{reason}"', klass=self.__class__.__name__, reason=reason) txaio.resolve(self.is_closed, self) try: wasClean = isinstance(reason.value, ConnectionDone) if self._session: self._session.onClose(wasClean) except Exception as e: # silently ignore exceptions raised here .. self.log.warn( '{klass}.connectionLost(): ApplicationSession.onClose raised "{err}"', klass=self.__class__.__name__, err=e) self._session = None def stringReceived(self, payload): self.log.trace('{klass}.stringReceived(): RX {octets} octets', klass=self.__class__.__name__, octets=_LazyHexFormatter(payload)) try: for msg in self._serializer.unserialize(payload): self.log.trace( "{klass}.stringReceived: RX WAMP message: {msg}", klass=self.__class__.__name__, msg=msg) self._session.onMessage(msg) except CancelledError as e: self.log.debug( "{klass}.stringReceived: WAMP CancelledError - connection will continue!\n{err}", klass=self.__class__.__name__, err=e) except InvalidUriError as e: self.log.warn( "{klass}.stringReceived: WAMP InvalidUriError - aborting connection!\n{err}", klass=self.__class__.__name__, err=e) self.abort() except ProtocolError as e: self.log.warn( "{klass}.stringReceived: WAMP ProtocolError - aborting connection!\n{err}", klass=self.__class__.__name__, err=e) self.abort() except PayloadExceededError as e: self.log.warn( "{klass}.stringReceived: WAMP PayloadExceededError - aborting connection!\n{err}", klass=self.__class__.__name__, err=e) self.abort() except SerializationError as e: self.log.warn( "{klass}.stringReceived: WAMP SerializationError - aborting connection!\n{err}", klass=self.__class__.__name__, err=e) self.abort() except Exception as e: self.log.failure() self.log.warn( "{klass}.stringReceived: WAMP Exception - aborting connection!\n{err}", klass=self.__class__.__name__, err=e) self.abort() def send(self, msg): """ Implements :func:`autobahn.wamp.interfaces.ITransport.send` """ if self.isOpen(): self.log.trace( '{klass}.send() (serializer={serializer}): TX WAMP message: "{msg}"', klass=self.__class__.__name__, msg=msg, serializer=self._serializer) try: payload, _ = self._serializer.serialize(msg) except SerializationError as e: # all exceptions raised from above should be serialization errors .. raise SerializationError( "WampRawSocketProtocol: unable to serialize WAMP application payload ({0})" .format(e)) else: payload_len = len(payload) if 0 < self._max_len_send < payload_len: emsg = 'tried to send RawSocket message with size {} exceeding payload limit of {} octets'.format( payload_len, self._max_len_send) self.log.warn(emsg) raise PayloadExceededError(emsg) else: self.sendString(payload) self.log.trace('{klass}.send(): TX {octets} octets', klass=self.__class__.__name__, octets=_LazyHexFormatter(payload)) else: raise TransportLost() def isOpen(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen` """ return self._session is not None def close(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.close` """ if self.isOpen(): self.transport.loseConnection() else: raise TransportLost() def abort(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.abort` """ if self.isOpen(): if hasattr(self.transport, 'abortConnection'): # ProcessProtocol lacks abortConnection() self.transport.abortConnection() else: self.transport.loseConnection() else: raise TransportLost()
import txaio import os from ocs import ocs_agent, site_config # Notes: # Each Lakeshore log gets its own "block" due to tracking the logs # being somewhat tricky. If a block structure is established and # then the thermometer removed from the scan, say if the user # switches to scanning a single channel, then the block structure # won't be reconstructed and thus won't match the existing # structure, causing an error. # For logging txaio.use_twisted() LOG = txaio.make_logger() class LogTracker: """Log Tracking helper class. Always tracks current date's logs. Parameters ---------- log_dir : str Top level log directory Attributes ---------- log_dir : str Top level log directory date : datetime.date
class WampMQTTServerFactory(Factory): log = make_logger() protocol = WampMQTTServerProtocol serializers = { u'json': JsonObjectSerializer(), u'msgpack': MsgPackObjectSerializer(), u'cbor': CBORObjectSerializer(), u'ubjson': UBJSONObjectSerializer(), } def __init__(self, router_session_factory, config, reactor): self._router_session_factory = router_session_factory self._router_factory = router_session_factory._routerFactory self._options = config.get(u'options', {}) self._realm = self._options.get(u'realm', None) self._reactor = reactor self._payload_mapping = StringTrie() for topic, pmap in self._options.get(u'payload_mapping', {}).items(): self._set_payload_format(topic, pmap) def buildProtocol(self, addr): protocol = self.protocol(self._reactor) protocol.factory = self return protocol def _get_payload_format(self, topic): """ Map a WAMP topic URI to MQTT payload format. :param topic: WAMP URI. :type topic: str :returns: Payload format metadata. :rtype: dict """ try: pmap = self._payload_mapping.longest_prefix_value(topic) except KeyError: return None else: return pmap def _set_payload_format(self, topic, pmap=None): if pmap is None: if topic in self._payload_mapping: del self._payload_mapping[topic] else: self._payload_mapping[topic] = pmap @inlineCallbacks def transform_wamp(self, topic, msg): # check for cached transformed payload cache_key = u'_{}_{}'.format(self.__class__.__name__, id(self)) cached = msg._serialized.get(cache_key, None) if cached: payload_format, mapped_topic, payload = cached self.log.debug('using cached payload for {cache_key} in message {msg_id}!', msg_id=id(msg), cache_key=cache_key) else: # convert WAMP URI to MQTT topic mapped_topic = _wamp_topic_to_mqtt(topic) # for WAMP->MQTT, the payload mapping is determined from the # WAMP URI (not the transformed MQTT topic) payload_format = self._get_payload_format(topic) payload_format_type = payload_format[u'type'] if payload_format_type == u'passthrough': payload = msg.payload elif payload_format_type == u'native': serializer = payload_format.get(u'serializer', None) payload = self._transform_wamp_native(serializer, msg) elif payload_format_type == u'dynamic': encoder = payload_format.get(u'encoder', None) codec_realm = payload_format.get(u'realm', self._realm) payload = yield self._transform_wamp_dynamic(encoder, codec_realm, mapped_topic, topic, msg) else: raise Exception('payload format {} not implemented'.format(payload_format)) msg._serialized[cache_key] = (payload_format, mapped_topic, payload) self.log.debug('transform_wamp({topic}, {msg}) -> payload_format={payload_format}, mapped_topic={mapped_topic}, payload={payload}', topic=topic, msg=msg, payload_format=payload_format, mapped_topic=mapped_topic, payload=payload) returnValue((payload_format, mapped_topic, payload)) @inlineCallbacks def _transform_wamp_dynamic(self, encoder, codec_realm, mapped_topic, topic, msg): codec_session = self._router_factory.get(codec_realm)._realm.session payload = yield codec_session.call(encoder, mapped_topic, topic, msg.args, msg.kwargs) returnValue(payload) def _transform_wamp_native(self, serializer, msg): obj = {} for opt in [u'args', u'kwargs', u'exclude', u'exclude_authid', u'exclude_authrole', u'eligible', u'eligible_authid', u'eligible_authrole']: attr = getattr(msg, opt, None) if attr is not None: obj[opt] = attr if serializer in self.serializers: payload = self.serializers[serializer].serialize(obj) else: raise Exception('MQTT native mode payload transform: invalid serializer {}'.format(serializer)) return payload @inlineCallbacks def transform_mqtt(self, topic, payload): # transform MQTT topic to WAMP URI mapped_topic = _mqtt_topicname_to_wamp(topic) # for MQTT->WAMP, the payload mapping is determined from the # transformed WAMP URI (not the original MQTT topic) payload_format = self._get_payload_format(mapped_topic) payload_format_type = payload_format[u'type'] if payload_format_type == u'passthrough': options = { u'payload': payload, u'enc_algo': u'mqtt' } elif payload_format_type == u'native': serializer = payload_format.get(u'serializer', None) options = self._transform_mqtt_native(serializer, payload) elif payload_format_type == u'dynamic': decoder = payload_format.get(u'decoder', None) codec_realm = payload_format.get(u'realm', self._realm) options = yield self._transform_mqtt_dynamic(decoder, codec_realm, mapped_topic, topic, payload) else: raise Exception('payload format {} not implemented'.format(payload_format)) self.log.debug('transform_mqtt({topic}, {payload}) -> payload_format={payload_format}, mapped_topic={mapped_topic}, options={options}', topic=topic, payload=payload, payload_format=payload_format, mapped_topic=mapped_topic, options=options) returnValue((payload_format, mapped_topic, options)) @inlineCallbacks def _transform_mqtt_dynamic(self, decoder, codec_realm, mapped_topic, topic, payload): codec_session = self._router_factory.get(codec_realm)._realm.session options = yield codec_session.call(decoder, mapped_topic, topic, payload) returnValue(options) def _transform_mqtt_native(self, serializer, payload): """ Transform MQTT binary payload from a MQTT Publish to keyword dict suitable for the constructor of a WAMP Publish message, that is :class:`autobahn.wamp.message.Publish`. """ options = {} if serializer in self.serializers: if serializer == u'json': if not _validator.validate(payload)[0]: # invalid UTF-8: drop the event raise Exception('invalid UTF8 in JSON encoded MQTT payload') obj = self.serializers[serializer].unserialize(payload)[0] else: raise Exception('"{}" serializer for encoded MQTT payload not implemented'.format(serializer)) if type(obj) != dict: raise Exception('invalid type {} for "{}" encoded MQTT payload'.format(type(obj), serializer)) for opt in [u'args', u'kwargs', u'exclude', u'exclude_authid', u'exclude_authrole', u'eligible', u'eligible_authid', u'eligible_authrole']: if opt in obj: options[opt] = obj[opt] return options
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol): """ Crossbar.io WAMP-over-WebSocket server protocol. """ log = make_logger() def __init__(self): super(WampWebSocketServerProtocol, self).__init__() self._cbtid = None def onConnect(self, request): if self.factory.debug_traffic: from twisted.internet import reactor def print_traffic(): self.log.info( "Traffic {peer}: {wire_in} / {wire_out} in / out bytes - {ws_in} / {ws_out} in / out msgs", peer=self.peer, wire_in=self.trafficStats.incomingOctetsWireLevel, wire_out=self.trafficStats.outgoingOctetsWireLevel, ws_in=self.trafficStats.incomingWebSocketMessages, ws_out=self.trafficStats.outgoingWebSocketMessages, ) reactor.callLater(1, print_traffic) print_traffic() # if WebSocket client did not set WS subprotocol, assume "wamp.2.json" # self.STRICT_PROTOCOL_NEGOTIATION = self.factory._requireWebSocketSubprotocol # handle WebSocket opening handshake # protocol, headers = websocket.WampWebSocketServerProtocol.onConnect(self, request) try: self._origin = request.origin # transport-level WMAP authentication info # self._authid = None self._authrole = None self._authrealm = None self._authmethod = None self._authextra = None self._authprovider = None # cookie tracking and cookie-based authentication # self._cbtid = None if self.factory._cookiestore: # try to parse an already set cookie from HTTP request headers self._cbtid = self.factory._cookiestore.parse(request.headers) # if no cookie is set, create a new one .. if self._cbtid is None: self._cbtid, headers['Set-Cookie'] = self.factory._cookiestore.create() if 'cookie' in self.factory._config: if 'secure' in self.factory._config['cookie'] and self.factory._config['cookie']['secure'] is True: headers['Set-Cookie'] += ';Secure' if 'http_strict' in self.factory._config['cookie'] and self.factory._config['cookie']['http_strict'] is True: headers['Set-Cookie'] += ';HttpOnly' if 'same_site' in self.factory._config['cookie']: headers['Set-Cookie'] += ';SameSite=' + self.factory._config['cookie']['same_site'] self.log.debug("Setting new cookie: {cookie}", cookie=headers['Set-Cookie']) else: self.log.debug("Cookie already set") # add this WebSocket connection to the set of connections # associated with the same cookie self.factory._cookiestore.addProto(self._cbtid, self) self.log.debug("Cookie tracking enabled on WebSocket connection {ws}", ws=self) # if cookie-based authentication is enabled, set auth info from cookie store # if 'auth' in self.factory._config and 'cookie' in self.factory._config['auth']: self._authid, self._authrole, self._authmethod, self._authrealm, self._authextra = self.factory._cookiestore.getAuth(self._cbtid) if self._authid: # there is a cookie set, and the cookie was previously successfully authenticated, # so immediately authenticate the client using that information self._authprovider = 'cookie' self.log.debug("Authenticated client via cookie {cookiename}={cbtid} as authid={authid}, authrole={authrole}, authmethod={authmethod}, authrealm={authrealm}", cookiename=self.factory._cookiestore._cookie_id_field, cbtid=self._cbtid, authid=self._authid, authrole=self._authrole, authmethod=self._authmethod, authrealm=self._authrealm) else: # there is a cookie set, but the cookie wasn't authenticated yet using a different auth method self.log.debug("Cookie-based authentication enabled, but cookie isn't authenticated yet") else: self.log.debug("Cookie-based authentication disabled") else: self.log.debug("Cookie tracking disabled on WebSocket connection {ws}", ws=self) # remember transport level info for later forwarding in # WAMP meta event "wamp.session.on_join" # self._transport_info = { 'type': 'websocket', 'protocol': protocol, 'peer': self.peer, # all HTTP headers as received by the WebSocket client 'http_headers_received': request.headers, # only customer user headers (such as cookie) 'http_headers_sent': headers, # all HTTP response lines sent (verbatim, in order as sent) # this will get filled in onOpen() from the HTTP response # data that will be stored by AutobahnPython at the WebSocket # protocol level (WebSocketServerProtocol) # 'http_response_lines': None, # WebSocket extensions in use .. will be filled in onOpen() - see below 'websocket_extensions_in_use': None, # Crossbar.io tracking ID (for cookie tracking) 'cbtid': self._cbtid } # accept the WebSocket connection, speaking subprotocol `protocol` # and setting HTTP headers `headers` # return (protocol, headers) except Exception: traceback.print_exc() def onOpen(self): if False: # this is little bit silly, we parse the complete response data into lines again http_response_lines = [] for line in self.http_response_data.split('\r\n'): line = line.strip() if line: http_response_lines.append(line) self._transport_info['http_response_lines'] = http_response_lines # note the WebSocket extensions negotiated self._transport_info['websocket_extensions_in_use'] = [e.__json__() for e in self.websocket_extensions_in_use] return super(WampWebSocketServerProtocol, self).onOpen() def sendServerStatus(self, redirectUrl=None, redirectAfter=0): """ Used to send out server status/version upon receiving a HTTP/GET without upgrade to WebSocket header (and option serverStatus is True). """ try: page = self.factory._templates.get_template('cb_ws_status.html') self.sendHtml(page.render(redirectUrl=redirectUrl, redirectAfter=redirectAfter, cbVersion=crossbar.__version__, wsUri=self.factory.url, peer=self.peer, workerPid=os.getpid())) except Exception: self.log.failure("Error rendering WebSocket status page template: {log_failure.value}") def onClose(self, wasClean, code, reason): super(WampWebSocketServerProtocol, self).onClose(wasClean, code, reason) # remove this WebSocket connection from the set of connections # associated with the same cookie if self._cbtid: self.factory._cookiestore.dropProto(self._cbtid, self)
class WebSocketReverseProxyServerProtocol(websocket.WebSocketServerProtocol): """ Server protocol to accept incoming (frontend) WebSocket connections and forward traffic to a backend WebSocket server. This protocol supports any WebSocket based subprotocol with text or binary payload. The WebSocket connection to the backend WebSocket server is configurable on the factory for this protocol. """ log = make_logger() def onConnect(self, request): """ Incoming (frontend) WebSocket connection accepted. Forward connect to backend WebSocket server. """ self.log.debug('WebSocketReverseProxyServerProtocol.onConnect(request={request})', request=request) self.backend_config = self.factory.path_config['backend'] self.backend_factory = WebSocketReverseProxyClientFactory(frontend_protocol=self, frontend_request=request, url=self.backend_config.get('url', None)) self.backend_factory.noisy = False self.backend_protocol = None # create and connect client endpoint # endpoint = create_connecting_endpoint_from_config(self.backend_config['endpoint'], None, self.factory.reactor, self.log) backend_on_connect = internet.defer.Deferred() # now, actually connect the client # d = endpoint.connect(self.backend_factory) def on_connect_success(proto): self.log.debug('WebSocketReverseProxyServerProtocol.onConnect(..): connected') proto.backend_on_connect = backend_on_connect self.backend_protocol = proto def on_connect_error(err): deny = ConnectionDeny(ConnectionDeny.SERVICE_UNAVAILABLE, 'WebSocket reverse proxy backend not reachable') backend_on_connect.errback(deny) d.addCallbacks(on_connect_success, on_connect_error) return backend_on_connect def onOpen(self): self.log.debug('WebSocketReverseProxyServerProtocol.onOpen()') def onMessage(self, payload, isBinary): if self.backend_protocol: self.log.debug('WebSocketReverseProxyServerProtocol: forwarding WebSocket message from frontend connection to backend connection') self.backend_protocol.sendMessage(payload, isBinary) else: self.log.warn('WebSocketReverseProxyServerProtocol: received WebSocket message on frontend connection while there is no backend connection! dropping WebSocket message') def onClose(self, wasClean, code, reason): if self.backend_protocol: self.log.debug('WebSocketReverseProxyServerProtocol: forwarding close from frontend connection to backend connection') code = 1000 self.backend_protocol.sendClose(code, reason) else: self.log.warn('WebSocketReverseProxyServerProtocol: received WebSocket close on frontend connection while there is no backend connection! dropping WebSocket close')
class RouterServiceAgent(ApplicationSession): """ User router-realm service session, and WAMP meta API implementation. Router service session which is used internally by a router to issue WAMP calls or publish events, and which provides WAMP meta API procedures. """ log = make_logger() def __init__(self, config, router, schemas=None): """ :param config: WAMP application component configuration. :type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`. :param router: The router this service session is running for. :type: router: instance of :class:`crossbar.router.session.CrossbarRouter` :param schemas: An (optional) initial schema dictionary to load. :type schemas: dict """ ApplicationSession.__init__(self, config) self._router = router self._schemas = {} if schemas: self._schemas.update(schemas) self.log.info( 'initialized schemas cache with {entries} entries', entries=len(self._schemas), ) # the service session can expose its API on multiple sessions # by default, it exposes its API only on itself, and that means, on the # router-realm the user started self._expose_on_sessions = [] enable_meta_api = self.config.extra.get('enable_meta_api', True) if self.config.extra else True if enable_meta_api: self._expose_on_sessions.append((self, None, None)) # optionally, when this option is set, the service session exposes its API # additionally on the management session to the local node router (and from there, to CFC) bridge_meta_api = self.config.extra.get('bridge_meta_api', False) if self.config.extra else False if bridge_meta_api: management_session = self.config.extra.get('management_session', None) if self.config.extra else None if management_session is None: raise Exception('logic error: missing management_session in extra') bridge_meta_api_prefix = self.config.extra.get('bridge_meta_api_prefix', None) if self.config.extra else None if bridge_meta_api_prefix is None: raise Exception('logic error: missing bridge_meta_api_prefix in extra') self._expose_on_sessions.append((management_session, bridge_meta_api_prefix, u'-')) def publish(self, topic, *args, **kwargs): # WAMP meta events published over the service session are published on the # service session itself (the first in the list of sessions to expose), and potentially # more sessions - namely the management session on the local node router dl = [] for session, prefix, replace_dots in self._expose_on_sessions: translated_topic = topic # we cannot subscribe in CFC to topics of the form # crossbarfabriccenter.node.<node_id>.worker.<worker_id>.realm.<realm_id>.root.*, # where * is an arbitrary suffix including dots, eg "wamp.session.on_join" # # to work around that, we replace the "."s in the suffix with "-", and reverse that # in CFC if replace_dots: translated_topic = translated_topic.replace(u'.', replace_dots) if prefix: translated_topic = u'{}{}'.format(prefix, translated_topic) self.log.debug('RouterServiceAgent.publish("{topic}") -> "{translated_topic}" on "{realm}"', topic=topic, translated_topic=translated_topic, realm=session._realm) dl.append(ApplicationSession.publish(session, translated_topic, *args, **kwargs)) # to keep the interface of ApplicationSession.publish, we only return the first # publish return (that is the return from publishing to the user router-realm) if len(dl) > 0: return dl[0] @inlineCallbacks def onJoin(self, details): self.log.info( '{klass}: realm service session attached (details={details})', klass=self.__class__.__name__, details=details, ) # register our API on all configured sessions and then fire onready # on_ready = self.config.extra.get('onready', None) if self.config.extra else None try: for session, prefix, _ in self._expose_on_sessions: regs = yield session.register(self, options=RegisterOptions(details_arg='details'), prefix=prefix) for reg in regs: if isinstance(reg, Registration): self.log.debug('Registered WAMP meta procedure <{proc}> on realm "{realm}"', proc=reg.procedure, realm=session._realm) elif isinstance(reg, Failure): err = reg.value if isinstance(err, ApplicationError): self.log.warn('Failed to register WAMP meta procedure on realm "{realm}": {error} ("{message}")', realm=session._realm, error=err.error, message=err.error_message()) else: self.log.warn('Failed to register WAMP meta procedure on realm "{realm}": {error}', realm=session._realm, error=str(err)) else: self.log.warn('Failed to register WAMP meta procedure on realm "{realm}": {error}', realm=session._realm, error=str(reg)) except Exception as e: self.log.failure() if on_ready: on_ready.errback(e) self.leave() else: self.log.info('{klass}: realm service session ready (realm_name="{realm}", on_ready={on_ready})', klass=self.__class__.__name__, realm=self._realm, on_ready=on_ready) if on_ready: on_ready.callback(self) def onLeave(self, details): self.log.info('{klass}: realm service session left (realm_name="{realm}", details={details})', klass=self.__class__.__name__, realm=self._realm, details=details) def onUserError(self, failure, msg): # ApplicationError's are raised explicitly and by purpose to signal # the peer. The error has already been handled "correctly" from our side. # Anything else wasn't explicitly treated .. the error "escaped" explicit # processing on our side. It needs to be logged to CB log, and CB code # needs to be expanded! if not isinstance(failure.value, ApplicationError): super(RouterServiceAgent, self).onUserError(failure, msg) @wamp.register(u'wamp.session.list') def session_list(self, filter_authroles=None, details=None): """ Get list of session IDs of sessions currently joined on the router. :param filter_authroles: If provided, only return sessions with an authrole from this list. :type filter_authroles: None or list :returns: List of WAMP session IDs (order undefined). :rtype: list """ self.log.info('wamp.session.list(filter_authroles={filter_authroles}, details={details})', filter_authroles=filter_authroles, details=details) assert(filter_authroles is None or isinstance(filter_authroles, list)) session_ids = [] for session in self._router._session_id_to_session.values(): if not is_restricted_session(session): if filter_authroles is None or session._session_details[u'authrole'] in filter_authroles: session_ids.append(session._session_id) return session_ids @wamp.register(u'wamp.session.count') def session_count(self, filter_authroles=None, details=None): """ Count sessions currently joined on the router. :param filter_authroles: If provided, only count sessions with an authrole from this list. :type filter_authroles: None or list :returns: Count of joined sessions. :rtype: int """ assert(filter_authroles is None or isinstance(filter_authroles, list)) session_count = 0 for session in self._router._session_id_to_session.values(): if not is_restricted_session(session): if filter_authroles is None or session._session_details[u'authrole'] in filter_authroles: session_count += 1 return session_count @wamp.register(u'wamp.session.get') def session_get(self, session_id, details=None): """ Get details for given session. :param session_id: The WAMP session ID to retrieve details for. :type session_id: int :returns: WAMP session details. :rtype: dict or None """ self.log.debug('wamp.session.get(session_id={session_id}, details={details})', session_id=session_id, details=details) if session_id in self._router._session_id_to_session: session = self._router._session_id_to_session[session_id] if not is_restricted_session(session): session_info = session._session_details.marshal() session_info[u'transport'] = None if session._transport is None else session._transport._transport_info return session_info else: self.log.warn('wamp.session.get: denied returning restricted session {session_id}', session_id=session_id) self.log.warn('wamp.session.get: session {session_id} not found', session_id=session_id) raise ApplicationError( ApplicationError.NO_SUCH_SESSION, u'no session with ID {} exists on this router'.format(session_id), ) @wamp.register(u'wamp.session.add_testament') def session_add_testament(self, topic, args, kwargs, publish_options=None, scope=u"destroyed", details=None): """ Add a testament to the current session. :param topic: The topic to publish the testament to. :type topic: str :param args: A list of arguments for the publish. :type args: list or tuple :param kwargs: A dict of keyword arguments for the publish. :type kwargs: dict :param publish_options: The publish options for the publish. :type publish_options: None or dict :param scope: The scope of the testament, either "detached" or "destroyed". :type scope: str :returns: The publication ID. :rtype: int """ session = self._router._session_id_to_session[details.caller] if scope not in [u"destroyed", u"detached"]: raise ApplicationError(u"wamp.error.testament_error", u"scope must be destroyed or detached") pub_id = util.id() # Get the publish options, remove some explicit keys publish_options = publish_options or {} publish_options.pop("acknowledge", None) publish_options.pop("exclude_me", None) pub = message.Publish( request=pub_id, topic=topic, args=args, kwargs=kwargs, **publish_options) session._testaments[scope].append(pub) return pub_id @wamp.register(u'wamp.session.flush_testaments') def session_flush_testaments(self, scope=u"destroyed", details=None): """ Flush the testaments of a given scope. :param scope: The scope to flush, either "detached" or "destroyed". :type scope: str :returns: Number of flushed testament events. :rtype: int """ session = self._router._session_id_to_session[details.caller] if scope not in [u"destroyed", u"detached"]: raise ApplicationError(u"wamp.error.testament_error", u"scope must be destroyed or detached") flushed = len(session._testaments[scope]) session._testaments[scope] = [] return flushed @wamp.register(u'wamp.session.kill') def session_kill(self, session_id, reason=None, message=None, details=None): """ Forcefully kill a session. :param session_id: The WAMP session ID of the session to kill. :type session_id: int :param reason: A reason URI provided to the killed session. :type reason: str or None """ if session_id in self._router._session_id_to_session: session = self._router._session_id_to_session[session_id] if not is_restricted_session(session): session.leave(reason=reason, message=message) return raise ApplicationError( ApplicationError.NO_SUCH_SESSION, u'no session with ID {} exists on this router'.format(session_id), ) @wamp.register(u'wamp.registration.remove_callee') def registration_remove_callee(self, registration_id, callee_id, reason=None, details=None): """ Forcefully remove callee from registration. :param registration_id: The ID of the registration to remove the callee from. :type registration_id: int :param callee_id: The WAMP session ID of the callee to remove. :type callee_id: int """ callee = self._router._session_id_to_session.get(callee_id, None) if not callee: raise ApplicationError( ApplicationError.NO_SUCH_SESSION, u'no session with ID {} exists on this router'.format(callee_id), ) registration = self._router._dealer._registration_map.get_observation_by_id(registration_id) if registration: if is_protected_uri(registration.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to remove callee for protected URI "{}"'.format(registration.uri), ) if callee not in registration.observers: raise ApplicationError( ApplicationError.NO_SUCH_REGISTRATION, u'session {} is not registered on registration {} on this dealer'.format(callee_id, registration_id), ) self._router._dealer.removeCallee(registration, callee, reason=reason) else: raise ApplicationError( ApplicationError.NO_SUCH_REGISTRATION, u'no registration with ID {} exists on this dealer'.format(registration_id), ) @wamp.register(u'wamp.subscription.remove_subscriber') def subscription_remove_subscriber(self, subscription_id, subscriber_id, reason=None, details=None): """ Forcefully remove subscriber from subscription. :param subscription_id: The ID of the subscription to remove the subscriber from. :type subscription_id: int :param subscriber_id: The WAMP session ID of the subscriber to remove. :type subscriber_id: int """ subscriber = self._router._session_id_to_session.get(subscriber_id, None) if not subscriber: raise ApplicationError( ApplicationError.NO_SUCH_SESSION, message=u'no session with ID {} exists on this router'.format(subscriber_id), ) subscription = self._router._broker._subscription_map.get_observation_by_id(subscription_id) if subscription: if is_protected_uri(subscription.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to remove subscriber for protected URI "{}"'.format(subscription.uri), ) if subscriber not in subscription.observers: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'session {} is not subscribed on subscription {} on this broker'.format(subscriber_id, subscription_id), ) self._router._broker.removeSubscriber(subscription, subscriber, reason=reason) else: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'no subscription with ID {} exists on this broker'.format(subscription_id), ) @wamp.register(u'wamp.registration.get') def registration_get(self, registration_id, details=None): """ Get registration details. :param registration_id: The ID of the registration to retrieve. :type registration_id: int :returns: The registration details. :rtype: dict """ registration = self._router._dealer._registration_map.get_observation_by_id(registration_id) if registration: if is_protected_uri(registration.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to get registration for protected URI "{}"'.format(registration.uri), ) registration_details = { u'id': registration.id, u'created': registration.created, u'uri': registration.uri, u'match': registration.match, u'invoke': registration.extra.invoke, } return registration_details else: raise ApplicationError( ApplicationError.NO_SUCH_REGISTRATION, u'no registration with ID {} exists on this dealer'.format(registration_id), ) @wamp.register(u'wamp.subscription.get') def subscription_get(self, subscription_id, details=None): """ Get subscription details. :param subscription_id: The ID of the subscription to retrieve. :type subscription_id: int :returns: The subscription details. :rtype: dict """ subscription = self._router._broker._subscription_map.get_observation_by_id(subscription_id) if subscription: if is_protected_uri(subscription.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to get subscription for protected URI "{}"'.format(subscription.uri), ) subscription_details = { u'id': subscription.id, u'created': subscription.created, u'uri': subscription.uri, u'match': subscription.match, } return subscription_details else: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'no subscription with ID {} exists on this broker'.format(subscription_id), ) @wamp.register(u'wamp.registration.list') def registration_list(self, session_id=None, details=None): """ List current registrations. :returns: A dictionary with three entries for the match policies 'exact', 'prefix' and 'wildcard', with a list of registration IDs for each. :rtype: dict """ if session_id: s2r = self._router._dealer._session_to_registrations session = None if session_id in self._router._session_id_to_session: session = self._router._session_id_to_session[session_id] if is_restricted_session(session): session = None if not session or session not in s2r: raise ApplicationError( ApplicationError.NO_SUCH_SESSION, u'no session with ID {} exists on this router'.format(session_id), ) _regs = s2r[session] regs = { u'exact': [reg.id for reg in _regs if reg.match == u'exact'], u'prefix': [reg.id for reg in _regs if reg.match == u'prefix'], u'wildcard': [reg.id for reg in _regs if reg.match == u'wildcard'], } return regs else: registration_map = self._router._dealer._registration_map registrations_exact = [] for registration in registration_map._observations_exact.values(): if not is_protected_uri(registration.uri, details): registrations_exact.append(registration.id) registrations_prefix = [] for registration in registration_map._observations_prefix.values(): if not is_protected_uri(registration.uri, details): registrations_prefix.append(registration.id) registrations_wildcard = [] for registration in registration_map._observations_wildcard.values(): if not is_protected_uri(registration.uri, details): registrations_wildcard.append(registration.id) regs = { u'exact': registrations_exact, u'prefix': registrations_prefix, u'wildcard': registrations_wildcard, } return regs @wamp.register(u'wamp.subscription.list') def subscription_list(self, session_id=None, details=None): """ List current subscriptions. :returns: A dictionary with three entries for the match policies 'exact', 'prefix' and 'wildcard', with a list of subscription IDs for each. :rtype: dict """ if session_id: s2s = self._router._broker._session_to_subscriptions session = None if session_id in self._router._session_id_to_session: session = self._router._session_id_to_session[session_id] if is_restricted_session(session): session = None if not session or session not in s2s: raise ApplicationError( ApplicationError.NO_SUCH_SESSION, u'no session with ID {} exists on this router'.format(session_id), ) _subs = s2s[session] subs = { u'exact': [sub.id for sub in _subs if sub.match == u'exact'], u'prefix': [sub.id for sub in _subs if sub.match == u'prefix'], u'wildcard': [sub.id for sub in _subs if sub.match == u'wildcard'], } return subs else: subscription_map = self._router._broker._subscription_map subscriptions_exact = [] for subscription in subscription_map._observations_exact.values(): if not is_protected_uri(subscription.uri, details): subscriptions_exact.append(subscription.id) subscriptions_prefix = [] for subscription in subscription_map._observations_prefix.values(): if not is_protected_uri(subscription.uri, details): subscriptions_prefix.append(subscription.id) subscriptions_wildcard = [] # FIXME # for subscription in subscription_map._observations_wildcard.values(): # if not is_protected_uri(subscription.uri, details): # subscriptions_wildcard.append(subscription.id) subs = { u'exact': subscriptions_exact, u'prefix': subscriptions_prefix, u'wildcard': subscriptions_wildcard, } return subs @wamp.register(u'wamp.registration.match') def registration_match(self, procedure, details=None): """ Given a procedure URI, return the registration best matching the procedure. This essentially models what a dealer does for dispatching an incoming call. :param procedure: The procedure to match. :type procedure: str :returns: The best matching registration or ``None``. :rtype: obj or None """ registration = self._router._dealer._registration_map.best_matching_observation(procedure) if registration and not is_protected_uri(registration.uri, details): return registration.id else: return None @wamp.register(u'wamp.subscription.match') def subscription_match(self, topic, details=None): """ Given a topic URI, returns all subscriptions matching the topic. This essentially models what a broker does for dispatching an incoming publication. :param topic: The topic to match. :type topic: str :returns: All matching subscriptions or ``None``. :rtype: obj or None """ subscriptions = self._router._broker._subscription_map.match_observations(topic) if subscriptions: subscription_ids = [] for subscription in subscriptions: if not is_protected_uri(subscription.uri, details): subscription_ids.append(subscription.id) if subscription_ids: return subscription_ids else: return None else: return None @wamp.register(u'wamp.registration.lookup') def registration_lookup(self, procedure, options=None, details=None): """ Given a procedure URI (and options), return the registration (if any) managing the procedure. This essentially models what a dealer does when registering for a procedure. :param procedure: The procedure to lookup the registration for. :type procedure: str :param options: Same options as when registering a procedure. :type options: dict or None :returns: The ID of the registration managing the procedure or ``None``. :rtype: int or None """ options = options or {} match = options.get(u'match', u'exact') registration = self._router._dealer._registration_map.get_observation(procedure, match) if registration and not is_protected_uri(registration.uri, details): return registration.id else: return None @wamp.register(u'wamp.subscription.lookup') def subscription_lookup(self, topic, options=None, details=None): """ Given a topic URI (and options), return the subscription (if any) managing the topic. This essentially models what a broker does when subscribing for a topic. :param topic: The topic to lookup the subscription for. :type topic: str :param options: Same options as when subscribing to a topic. :type options: dict or None :returns: The ID of the subscription managing the topic or ``None``. :rtype: int or None """ options = options or {} match = options.get(u'match', u'exact') subscription = self._router._broker._subscription_map.get_observation(topic, match) if subscription and not is_protected_uri(subscription.uri, details): return subscription.id else: return None @wamp.register(u'wamp.registration.list_callees') def registration_list_callees(self, registration_id, details=None): """ Retrieve list of callees (WAMP session IDs) registered on (attached to) a registration. :param registration_id: The ID of the registration to get callees for. :type registration_id: int :returns: A list of WAMP session IDs of callees currently attached to the registration. :rtype: list """ registration = self._router._dealer._registration_map.get_observation_by_id(registration_id) if registration: if is_protected_uri(registration.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to list callees for protected URI "{}"'.format(registration.uri), ) session_ids = [] for callee in registration.observers: session_ids.append(callee._session_id) return session_ids else: raise ApplicationError( ApplicationError.NO_SUCH_REGISTRATION, u'no registration with ID {} exists on this dealer'.format(registration_id), ) @wamp.register(u'wamp.subscription.list_subscribers') def subscription_list_subscribers(self, subscription_id, details=None): """ Retrieve list of subscribers (WAMP session IDs) subscribed on (attached to) a subscription. :param subscription_id: The ID of the subscription to get subscribers for. :type subscription_id: int :returns: A list of WAMP session IDs of subscribers currently attached to the subscription. :rtype: list """ subscription = self._router._broker._subscription_map.get_observation_by_id(subscription_id) if subscription: if is_protected_uri(subscription.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to list subscribers for protected URI "{}"'.format(subscription.uri), ) session_ids = [] for subscriber in subscription.observers: session_ids.append(subscriber._session_id) return session_ids else: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'no subscription with ID {} exists on this broker'.format(subscription_id), ) @wamp.register(u'wamp.registration.count_callees') def registration_count_callees(self, registration_id, details=None): """ Retrieve number of callees registered on (attached to) a registration. :param registration_id: The ID of the registration to get the number of callees for. :type registration_id: int :returns: Number of callees currently attached to the registration. :rtype: int """ registration = self._router._dealer._registration_map.get_observation_by_id(registration_id) if registration: if is_protected_uri(registration.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to count callees for protected URI "{}"'.format(registration.uri), ) return len(registration.observers) else: raise ApplicationError( ApplicationError.NO_SUCH_REGISTRATION, u'no registration with ID {} exists on this dealer'.format(registration_id), ) @wamp.register(u'wamp.subscription.count_subscribers') def subscription_count_subscribers(self, subscription_id, details=None): """ Retrieve number of subscribers subscribed on (attached to) a subscription. :param subscription_id: The ID of the subscription to get the number subscribers for. :type subscription_id: int :returns: Number of subscribers currently attached to the subscription. :rtype: int """ subscription = self._router._broker._subscription_map.get_observation_by_id(subscription_id) if subscription: if is_protected_uri(subscription.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to count subscribers for protected URI "{}"'.format(subscription.uri), ) return len(subscription.observers) else: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'no subscription with ID {} exists on this broker'.format(subscription_id), ) @wamp.register(u'wamp.subscription.get_events') def subscription_get_events(self, subscription_id, limit=10, details=None): """ Return history of events for given subscription. :param subscription_id: The ID of the subscription to get events for. :type subscription_id: int :param limit: Return at most this many events. :type limit: int :returns: List of events. :rtype: list """ self.log.debug('subscription_get_events({subscription_id}, {limit})', subscription_id=subscription_id, limit=limit) if not self._router._broker._event_store: raise ApplicationError( u'wamp.error.history_unavailable', message=u'event history not available or enabled', ) subscription = self._router._broker._subscription_map.get_observation_by_id(subscription_id) if subscription: if is_protected_uri(subscription.uri, details): raise ApplicationError( ApplicationError.NOT_AUTHORIZED, message=u'not authorized to retrieve event history for protected URI "{}"'.format(subscription.uri), ) events = self._router._broker._event_store.get_events(subscription_id, limit) if events is None: # a return value of None in above signals that event history really # is not available/enabled (which is different from an empty history!) raise ApplicationError( u'wamp.error.history_unavailable', message=u'event history for the given subscription is not available or enabled', ) else: return events else: raise ApplicationError( ApplicationError.NO_SUCH_SUBSCRIPTION, u'no subscription with ID {} exists on this broker'.format(subscription_id), ) def schema_describe(self, uri=None, details=None): """ Describe a given URI or all URIs. :param uri: The URI to describe or ``None`` to retrieve all declarations. :type uri: str :returns: A list of WAMP schema declarations. :rtype: list """ raise Exception('not implemented') def schema_define(self, uri, schema, details=None): """ Declare metadata for a given URI. :param uri: The URI for which to declare metadata. :type uri: str :param schema: The WAMP schema declaration for the URI or `None` to remove any declarations for the URI. :type schema: dict :returns: ``None`` if declaration was unchanged, ``True`` if declaration was new, ``False`` if declaration existed, but was modified. :rtype: bool or None """ raise Exception('not implemented')
class PendingAuthScram(PendingAuth): """ Pending SCRAM authentication. """ log = make_logger() AUTHMETHOD = 'scram' def __init__(self, pending_session_id, transport_info, realm_container, config): super(PendingAuthScram, self).__init__( pending_session_id, transport_info, realm_container, config, ) # https://tools.ietf.org/html/rfc5056 # https://tools.ietf.org/html/rfc5929 # https://www.ietf.org/proceedings/90/slides/slides-90-uta-0.pdf channel_id_hex = transport_info.get('channel_id', None) if channel_id_hex: self._channel_id = binascii.a2b_hex(channel_id_hex) else: self._channel_id = None def hello(self, realm, details): # the channel binding requested by the client authenticating # client must send "nonce" in details, and MAY send "gs2_cbind_flag" self._client_nonce = details.authextra.get("nonce", None) if self._client_nonce is None: return types.Deny(message='client must send a nonce') try: self._client_nonce = base64.b64decode(self._client_nonce) except Exception: return types.Deny(message='client nonce must be base64') # FIXME TODO: channel-binding (currently "gs2_cbind_flag" in # the draft spec) # remember the realm the client requested to join (if any) self._realm = realm # remember the authid the client wants to identify as (if any) # XXX should we just "saslprep()" it here? self._authid = details.authid if self._authid is None: return types.Deny( message='cannot identify client: no authid requested') self._session_details[ 'authmethod'] = self._authmethod # from AUTHMETHOD, via base self._session_details['authextra'] = details.authextra def on_authenticate_ok(principal): self._salt = binascii.a2b_hex( principal['salt']) # error if no salt per-user self._iterations = principal['iterations'] self._memory = principal['memory'] self._kdf = principal['kdf'] self._stored_key = binascii.a2b_hex(principal['stored-key']) # do we actually need the server-key? can we compute it ourselves? self._server_key = binascii.a2b_hex(principal['server-key']) error = self._assign_principal(principal) if error: return error # XXX TODO this needs to include (optional) channel-binding extra = self._compute_challenge() return types.Challenge(self._authmethod, extra) # use static principal database from configuration if self._config['type'] == 'static': self._authprovider = 'static' if self._authid in self._config.get('principals', {}): # we've already validated the configuration return on_authenticate_ok( self._config['principals'][self._authid]) else: self.log.debug("No pricipal found for {authid}", authid=details.authid) return types.Deny( message='no principal with authid "{}" exists'.format( details.authid)) elif self._config['type'] == 'dynamic': error = self._init_dynamic_authenticator() if error: return error d = self._authenticator_session.call(self._authenticator, realm, details.authid, self._session_details) def on_authenticate_error(err): return self._marshal_dynamic_authenticator_error(err) d.addCallbacks(on_authenticate_ok, on_authenticate_error) return d else: # should not arrive here, as config errors should be caught earlier return types.Deny( message= 'invalid authentication configuration (authentication type "{}" is unknown)' .format(self._config['type'])) # XXX TODO this needs to include (optional) channel-binding def _compute_challenge(self): self._server_nonce = self._client_nonce + os.urandom(16) challenge = { "nonce": base64.b64encode(self._server_nonce).decode('ascii'), "kdf": self._kdf, "salt": base64.b64encode(self._salt).decode('ascii'), "iterations": self._iterations, "memory": self._memory, } return challenge def authenticate(self, signed_message): """ Verify the signed message sent by the client. :param signed_message: the base64-encoded result "ClientProof" from the SCRAM protocol """ channel_binding = "" client_nonce = base64.b64encode(self._client_nonce).decode('ascii') server_nonce = base64.b64encode(self._server_nonce).decode('ascii') salt = base64.b64encode(self._salt).decode('ascii') auth_message = ( "{client_first_bare},{server_first},{client_final_no_proof}". format( client_first_bare="n={},r={}".format(saslprep(self._authid), client_nonce), server_first="r={},s={},i={}".format(server_nonce, salt, self._iterations), client_final_no_proof="c={},r={}".format( channel_binding, server_nonce), )) received_client_proof = base64.b64decode(signed_message) client_signature = hmac.new(self._stored_key, auth_message.encode('ascii'), hashlib.sha256).digest() recovered_client_key = util.xor(client_signature, received_client_proof) recovered_stored_key = hashlib.new('sha256', recovered_client_key).digest() # if we adjust self._authextra before _accept() it gets sent # back to the client server_signature = hmac.new(self._server_key, auth_message.encode('ascii'), hashlib.sha256).digest() if self._authextra is None: self._authextra = {} self._authextra['scram_server_signature'] = base64.b64encode( server_signature).decode('ascii') if hmac.compare_digest(recovered_stored_key, self._stored_key): return self._accept() self.log.error("SCRAM authentication failed for '{authid}'", authid=self._authid) return types.Deny(message='SCRAM authentication failed')
class Node(object): """ A Crossbar.io node is the running a controller process and one or multiple worker processes. A single Crossbar.io node runs exactly one instance of this class, hence this class can be considered a system singleton. """ log = make_logger() def __init__(self, cbdir=None, reactor=None): """ :param cbdir: The node directory to run from. :type cbdir: unicode :param reactor: Reactor to run on. :type reactor: obj or None """ # node directory self._cbdir = cbdir or u'.' # reactor we should run on if reactor is None: from twisted.internet import reactor self._reactor = reactor # the node's management realm when running in managed mode (this comes from CDC!) self._management_realm = None # the node's ID when running in managed mode (this comes from CDC!) self._node_id = None # node extra when running in managed mode (this comes from CDC!) self._node_extra = None # the node controller realm self._realm = u'crossbar' # config of this node. self._config = None # node private key autobahn.wamp.cryptosign.SigningKey self._node_key = None # node controller session (a singleton ApplicationSession embedded # in the local node router) self._controller = None # when running in managed mode, this will hold the bridge session # attached to the local management router self._bridge_session = None # when running in managed mode, this will hold the uplink session to CDC self._manager = None # node shutdown triggers, one or more of checkconfig.NODE_SHUTDOWN_MODES self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # map from router worker IDs to self._realm_templates = {} # for node elements started under specific IDs, and where # the node configuration does not specify an ID, use a generic # name numbered sequentially using the counters here self._worker_no = 1 self._realm_no = 1 self._role_no = 1 self._connection_no = 1 self._transport_no = 1 self._component_no = 1 def maybe_generate_key(self, cbdir, privkey_path=u'key.priv', pubkey_path=u'key.pub'): privkey_path = os.path.join(cbdir, privkey_path) pubkey_path = os.path.join(cbdir, pubkey_path) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_keyfile(privkey_path, private=True) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519' ]: if tag not in priv_tags: raise Exception( "Corrupt node private key file {} - {} tag not found". format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = SigningKey(privkey_hex, encoder=HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path)) if os.path.exists(pubkey_path): pub_tags = _parse_keyfile(pubkey_path, private=False) for tag in [ u'creator', u'created-at', u'machine-id', u'public-key-ed25519' ]: if tag not in pub_tags: raise Exception( "Corrupt node public key file {} - {} tag not found" .format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception(( "Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519" ).format(pubkey_path)) else: self.log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) self.log.debug("Node key already exists (public key: {hex})", hex=pubkey_hex) else: # node private key does not yet exist: generate one privkey = SigningKey.generate() privkey_hex = privkey.encode(encoder=HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) self.log.info("New node key pair generated!") # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path ).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) self.log.info("File permissions on node public key fixed!") if os.stat(privkey_path ).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) self.log.info("File permissions on node private key fixed!") self._node_key = cryptosign.SigningKey(privkey) return pubkey_hex def load(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in CDC default config. """ if configfile: configpath = os.path.join(self._cbdir, configfile) self.log.debug("Loading node configuration from '{configpath}' ..", configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = checkconfig.check_config_file(configpath) self.log.info("Node configuration loaded from '{configfile}'", configfile=configfile) else: self._config = {u'version': 2, u'controller': {}, u'workers': []} checkconfig.check_config(self._config) self.log.info("Node configuration loaded from built-in config.") @inlineCallbacks def start(self, cdc_mode=False): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ if not self._config: raise Exception("No node configuration loaded") # get controller config/options # controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # set controller process title # try: import setproctitle except ImportError: self.log.warn( "Warning, could not set process title (setproctitle not installed)" ) else: setproctitle.setproctitle( controller_options.get('title', 'crossbar-controller')) # router and factory that creates router sessions # self._router_factory = RouterFactory() self._router_session_factory = RouterSessionFactory( self._router_factory) # create a new router for the realm # rlm_config = {'name': self._realm} rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # always add a realm service session # cfg = ComponentConfig(self._realm) rlm.session = RouterServiceSession(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') # add a router bridge session when running in managed mode # if cdc_mode: self._bridge_session = NodeManagementBridgeSession(cfg) self._router_session_factory.add(self._bridge_session, authrole=u'trusted') else: self._bridge_session = None # Node shutdown mode # if cdc_mode: # in managed mode, a node - by default - only shuts down when explicitly asked to, # or upon a fatal error in the node controller self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED ] else: # in standalone mode, a node - by default - is immediately shutting down whenever # a worker exits (successfully or with error) self._node_shutdown_triggers = [ checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT ] # allow to override node shutdown triggers # if 'shutdown' in controller_options: self.log.info( "Overriding default node shutdown triggers with {triggers} from node config", triggers=controller_options['shutdown']) self._node_shutdown_triggers = controller_options['shutdown'] else: self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers) # add the node controller singleton session # self._controller = NodeControllerSession(self) self._router_session_factory.add(self._controller, authrole=u'trusted') # detect WAMPlets (FIXME: remove this!) # wamplets = self._controller._get_wamplets() if len(wamplets) > 0: self.log.info("Detected {wamplets} WAMPlets in environment:", wamplets=len(wamplets)) for wpl in wamplets: self.log.info("WAMPlet {dist}.{name}", dist=wpl['dist'], name=wpl['name']) else: self.log.debug("No WAMPlets detected in enviroment.") panic = False try: # startup the node from local node configuration # yield self._startup(self._config) # connect to CDC when running in managed mode # if cdc_mode: cdc_config = controller_config.get( 'cdc', { # CDC connecting transport u'transport': { u'type': u'websocket', u'url': u'wss://cdc.crossbario.com/ws', u'endpoint': { u'type': u'tcp', u'host': u'cdc.crossbario.com', u'port': 443, u'timeout': 5, u'tls': { u'hostname': u'cdc.crossbario.com' } } } }) transport = cdc_config[u'transport'] hostname = None if u'tls' in transport[u'endpoint']: transport[u'endpoint'][u'tls'][u'hostname'] runner = ApplicationRunner( url=transport['url'], realm=None, extra=None, ssl=optionsForClientTLS(hostname) if hostname else None, ) def make(config): # extra info forwarded to CDC client session extra = { 'node': self, 'on_ready': Deferred(), 'on_exit': Deferred(), 'node_key': self._node_key, } @inlineCallbacks def on_ready(res): self._manager, self._management_realm, self._node_id, self._node_extra = res if self._bridge_session: try: yield self._bridge_session.attach_manager( self._manager, self._management_realm, self._node_id) status = yield self._manager.call( u'com.crossbario.cdc.general.get_status@1') except: self.log.failure() else: self.log.info( 'Connected to CDC for management realm "{realm}" (current time is {now})', realm=self._management_realm, now=status[u'now']) else: self.log.warn( 'Uplink CDC session established, but no bridge session setup!' ) @inlineCallbacks def on_exit(res): if self._bridge_session: try: yield self._bridge_session.detach_manager() except: self.log.failure() else: self.log.info( 'Disconnected from CDC for management realm "{realm}"', realm=self._management_realm) else: self.log.warn( 'Uplink CDC session lost, but no bridge session setup!' ) self._manager, self._management_realm, self._node_id, self._node_extra = None, None, None, None extra['on_ready'].addCallback(on_ready) extra['on_exit'].addCallback(on_exit) config = ComponentConfig(extra=extra) session = NodeManagementSession(config) return session self.log.info("Connecting to CDC at '{url}' ..", url=transport[u'url']) yield runner.run(make, start_reactor=False, auto_reconnect=True) # Notify systemd that crossbar is fully up and running # (this has no effect on non-systemd platforms) try: import sdnotify sdnotify.SystemdNotifier().notify("READY=1") except: pass except ApplicationError as e: panic = True self.log.error("{msg}", msg=e.error_message()) except Exception: panic = True traceback.print_exc() if panic: try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass @inlineCallbacks def _startup(self, config): """ Startup elements in the node as specified in the provided node configuration. """ self.log.info('Configuring node from config ..') # call options we use to call into the local node management API call_options = CallOptions() # fake call details we use to call into the local node management API call_details = CallDetails(caller=0) # get contoller configuration subpart controller = config.get('controller', {}) # start Manhole in node controller if 'manhole' in controller: yield self._controller.start_manhole(controller['manhole'], details=call_details) # startup all workers for worker in config.get('workers', []): # worker ID if 'id' in worker: worker_id = worker.pop('id') else: worker_id = 'worker-{:03d}'.format(self._worker_no) self._worker_no += 1 # worker type - a type of working process from the following fixed list worker_type = worker['type'] assert (worker_type in ['router', 'container', 'guest', 'websocket-testee']) # set logname depending on worker type if worker_type == 'router': worker_logname = "Router '{}'".format(worker_id) elif worker_type == 'container': worker_logname = "Container '{}'".format(worker_id) elif worker_type == 'websocket-testee': worker_logname = "WebSocketTestee '{}'".format(worker_id) elif worker_type == 'guest': worker_logname = "Guest '{}'".format(worker_id) else: raise Exception("logic error") # any worker specific options worker_options = worker.get('options', {}) # native worker processes: router, container, websocket-testee if worker_type in ['router', 'container', 'websocket-testee']: # start a new native worker process .. if worker_type == 'router': yield self._controller.start_router(worker_id, worker_options, details=call_details) elif worker_type == 'container': yield self._controller.start_container( worker_id, worker_options, details=call_details) elif worker_type == 'websocket-testee': yield self._controller.start_websocket_testee( worker_id, worker_options, details=call_details) else: raise Exception("logic error") # setup native worker generic stuff if 'pythonpath' in worker_options: added_paths = yield self._controller.call( 'crossbar.worker.{}.add_pythonpath'.format(worker_id), worker_options['pythonpath'], options=call_options) self.log.debug("{worker}: PYTHONPATH extended for {paths}", worker=worker_logname, paths=added_paths) if 'cpu_affinity' in worker_options: new_affinity = yield self._controller.call( 'crossbar.worker.{}.set_cpu_affinity'.format( worker_id), worker_options['cpu_affinity'], options=call_options) self.log.debug("{worker}: CPU affinity set to {affinity}", worker=worker_logname, affinity=new_affinity) if 'manhole' in worker: yield self._controller.call( 'crossbar.worker.{}.start_manhole'.format(worker_id), worker['manhole'], options=call_options) self.log.debug("{worker}: manhole started", worker=worker_logname) # setup router worker if worker_type == 'router': # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm.pop('id') else: realm_id = 'realm-{:03d}'.format(self._realm_no) self._realm_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm'.format( worker_id), realm_id, realm, options=call_options) self.log.info( "{worker}: realm '{realm_id}' (named '{realm_name}') started", worker=worker_logname, realm_id=realm_id, realm_name=realm['name']) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role.pop('id') else: role_id = 'role-{:03d}'.format(self._role_no) self._role_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm_role'. format(worker_id), realm_id, role_id, role, options=call_options) self.log.info( "{logname}: role '{role}' (named '{role_name}') started on realm '{realm}'", logname=worker_logname, role=role_id, role_name=role['name'], realm=realm_id, ) # start uplinks for realm for uplink in realm.get('uplinks', []): if 'id' in uplink: uplink_id = uplink.pop('id') else: uplink_id = 'uplink-{:03d}'.format( self._uplink_no) self._uplink_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_realm_uplink'. format(worker_id), realm_id, uplink_id, uplink, options=call_options) self.log.info( "{logname}: uplink '{uplink}' started on realm '{realm}'", logname=worker_logname, uplink=uplink_id, realm=realm_id, ) # start connections (such as PostgreSQL database connection pools) # to run embedded in the router for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_component'.format( worker_id), component_id, component, options=call_options) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker['transports']: if 'id' in transport: transport_id = transport.pop('id') else: transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_router_transport'.format( worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) # setup container worker elif worker_type == 'container': # if components exit "very soon after" we try to # start them, we consider that a failure and shut # our node down. We remove this subscription 2 # seconds after we're done starting everything # (see below). This is necessary as # start_container_component returns as soon as # we've established a connection to the component def component_exited(info): component_id = info.get("id") self.log.critical( "Component '{component_id}' failed to start; shutting down node.", component_id=component_id) try: self._reactor.stop() except twisted.internet.error.ReactorNotRunning: pass topic = 'crossbar.worker.{}.container.on_component_stop'.format( worker_id) component_stop_sub = yield self._controller.subscribe( component_exited, topic) # start connections (such as PostgreSQL database connection pools) # to run embedded in the container # for connection in worker.get('connections', []): if 'id' in connection: connection_id = connection.pop('id') else: connection_id = 'connection-{:03d}'.format( self._connection_no) self._connection_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_connection'.format( worker_id), connection_id, connection, options=call_options) self.log.info( "{logname}: connection '{connection}' started", logname=worker_logname, connection=connection_id, ) # start components to run embedded in the container # for component in worker.get('components', []): if 'id' in component: component_id = component.pop('id') else: component_id = 'component-{:03d}'.format( self._component_no) self._component_no += 1 yield self._controller.call( 'crossbar.worker.{}.start_container_component'. format(worker_id), component_id, component, options=call_options) self.log.info( "{worker}: component '{component_id}' started", worker=worker_logname, component_id=component_id) # after 2 seconds, consider all the application components running self._reactor.callLater(2, component_stop_sub.unsubscribe) # setup websocket-testee worker elif worker_type == 'websocket-testee': # start transport on websocket-testee transport = worker['transport'] transport_id = 'transport-{:03d}'.format( self._transport_no) self._transport_no = 1 yield self._controller.call( 'crossbar.worker.{}.start_websocket_testee_transport'. format(worker_id), transport_id, transport, options=call_options) self.log.info( "{logname}: transport '{tid}' started", logname=worker_logname, tid=transport_id, ) else: raise Exception("logic error") elif worker_type == 'guest': # start guest worker # yield self._controller.start_guest(worker_id, worker, details=call_details) self.log.info("{worker}: started", worker=worker_logname) else: raise Exception("logic error")
class WebSocketAdapterProtocol(twisted.internet.protocol.Protocol): """ Adapter class for Twisted WebSocket client and server protocols. """ log = txaio.make_logger() peer = None peer_transport = None def connectionMade(self): # the peer we are connected to try: self.peer = peer2str(self.transport.getPeer()) except AttributeError: # ProcessProtocols lack getPeer() self.peer = 'process:{}'.format(self.transport.pid) self.peer_transport = 'websocket' self._connectionMade() self.log.debug('Connection made to {peer}', peer=self.peer) # Set "Nagle" try: self.transport.setTcpNoDelay(self.tcpNoDelay) except: # don't touch this! does not work: AttributeError, OSError # eg Unix Domain sockets throw Errno 22 on this pass def connectionLost(self, reason): if isinstance(reason.value, ConnectionDone): self.log.debug("Connection to/from {peer} was closed cleanly", peer=self.peer) elif _is_tls_error(reason.value): self.log.error(_maybe_tls_reason(reason.value)) elif isinstance(reason.value, ConnectionAborted): self.log.debug("Connection to/from {peer} was aborted locally", peer=self.peer) elif isinstance(reason.value, ConnectionLost): message = str(reason.value) if hasattr(reason.value, 'message'): message = reason.value.message self.log.debug( "Connection to/from {peer} was lost in a non-clean fashion: {message}", peer=self.peer, message=message, ) # at least: FileDescriptorOverrun, ConnectionFdescWentAway - but maybe others as well? else: self.log.debug( "Connection to/from {peer} lost ({error_type}): {error})", peer=self.peer, error_type=type(reason.value), error=reason.value) self._connectionLost(reason) def dataReceived(self, data): self._dataReceived(data) def _closeConnection(self, abort=False): if abort and hasattr(self.transport, 'abortConnection'): self.transport.abortConnection() else: # e.g. ProcessProtocol lacks abortConnection() self.transport.loseConnection() def _onOpen(self): self.onOpen() def _onMessageBegin(self, isBinary): self.onMessageBegin(isBinary) def _onMessageFrameBegin(self, length): self.onMessageFrameBegin(length) def _onMessageFrameData(self, payload): self.onMessageFrameData(payload) def _onMessageFrameEnd(self): self.onMessageFrameEnd() def _onMessageFrame(self, payload): self.onMessageFrame(payload) def _onMessageEnd(self): self.onMessageEnd() def _onMessage(self, payload, isBinary): self.onMessage(payload, isBinary) def _onPing(self, payload): self.onPing(payload) def _onPong(self, payload): self.onPong(payload) def _onClose(self, wasClean, code, reason): self.onClose(wasClean, code, reason) def registerProducer(self, producer, streaming): """ Register a Twisted producer with this protocol. :param producer: A Twisted push or pull producer. :type producer: object :param streaming: Producer type. :type streaming: bool """ self.transport.registerProducer(producer, streaming) def unregisterProducer(self): """ Unregister Twisted producer with this protocol. """ self.transport.unregisterProducer()
class NativeWorkerClientProtocol(WampWebSocketClientProtocol): log = make_logger() def connectionMade(self): WampWebSocketClientProtocol.connectionMade(self) self._pid = self.transport.pid self.factory.proto = self # native workers are implicitly trusted self._authid = 'crossbar.process.{}'.format(self._pid) self._authrole = self.factory._authrole # the worker is actively spawned by the node controller, # and we talk over the pipes that were create during # process creation. this established implicit trust. self._authmethod = 'trusted' # the trust is established implicitly by the way the # the client (worker) is created self._authprovider = 'programcode' # FIXME / CHECKME self._cbtid = None self._transport_info = None def connectionLost(self, reason): if isinstance(reason.value, ConnectionDone): self.log.info("Native worker connection closed cleanly.") else: self.log.warn( "Native worker connection closed uncleanly: {reason}", reason=reason.value) WampWebSocketClientProtocol.connectionLost(self, reason) self.factory.proto = None if isinstance(reason.value, ProcessTerminated): if not self.factory._on_ready.called: # the worker was never ready in the first place .. self.factory._on_ready.errback(reason) else: # the worker _did_ run (was ready before), but now exited with error if not self.factory._on_exit.called: self.factory._on_exit.errback(reason) else: self.log.error( "unhandled code path (1) in WorkerClientProtocol.connectionLost: {reason}", reason=reason.value) elif isinstance(reason.value, (ProcessDone, ConnectionDone)): # the worker exited cleanly if not self.factory._on_exit.called: self.factory._on_exit.callback(None) else: self.log.error( "unhandled code path (2) in WorkerClientProtocol.connectionLost: {reason}", reason=reason.value) else: # should not arrive here self.log.error( "unhandled code path (3) in WorkerClientProtocol.connectionLost: {reason}", reason=reason.value)
class WampRawSocketServerFactory(rawsocket.WampRawSocketServerFactory): """ Crossbar.io WAMP-over-RawSocket server factory. """ protocol = WampRawSocketServerProtocol log = make_logger() def __init__(self, factory, config): # remember transport configuration # self._config = config # explicit list of WAMP serializers # if u'serializers' in config: serializers = [] sers = set(config['serializers']) if u'cbor' in sers: # try CBOR WAMP serializer try: from autobahn.wamp.serializer import CBORSerializer serializers.append(CBORSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-CBOR serializer") else: sers.discard(u'cbor') if u'msgpack' in sers: # try MsgPack WAMP serializer try: from autobahn.wamp.serializer import MsgPackSerializer serializer = MsgPackSerializer() serializer._serializer.ENABLE_V5 = False # FIXME serializers.append(serializer) except ImportError: self.log.warn( "Warning: could not load WAMP-MsgPack serializer") else: sers.discard(u'msgpack') if u'ubjson' in sers: # try UBJSON WAMP serializer try: from autobahn.wamp.serializer import UBJSONSerializer serializers.append(UBJSONSerializer(batched=True)) serializers.append(UBJSONSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-UBJSON serializer") else: sers.discard(u'ubjson') if u'json' in sers: # try JSON WAMP serializer try: from autobahn.wamp.serializer import JsonSerializer serializers.append(JsonSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-JSON serializer") else: sers.discard(u'json') if not serializers: raise Exception("no valid WAMP serializers specified") if len(sers) > 0: raise Exception( "invalid WAMP serializers specified (the following were unprocessed) {}" .format(sers)) else: serializers = None # Maximum message size # self._max_message_size = config.get('max_message_size', 128 * 1024) # default is 128kB rawsocket.WampRawSocketServerFactory.__init__(self, factory, serializers) self.log.debug( "RawSocket transport factory created using {serializers} serializers, max. message size {maxsize}", serializers=serializers, maxsize=self._max_message_size) def buildProtocol(self, addr): p = self.protocol() p.factory = self p.MAX_LENGTH = self._max_message_size return p
class WampWebSocketServerFactory(websocket.WampWebSocketServerFactory): """ Crossbar.io WAMP-over-WebSocket server factory. """ showServerVersion = False protocol = WampWebSocketServerProtocol log = make_logger() def __init__(self, factory, cbdir, config, templates): """ Ctor. :param factory: WAMP session factory. :type factory: An instance of .. :param cbdir: The Crossbar.io node directory. :type cbdir: str :param config: Crossbar transport configuration. :type config: dict """ self.debug_traffic = config.get('debug_traffic', False) options = config.get('options', {}) self.showServerVersion = options.get('show_server_version', self.showServerVersion) if self.showServerVersion: server = "Crossbar/{}".format(crossbar.__version__) else: server = "Crossbar" externalPort = options.get('external_port', None) # explicit list of WAMP serializers # if 'serializers' in config: serializers = [] sers = set(config['serializers']) if u'cbor' in sers: # try CBOR WAMP serializer try: from autobahn.wamp.serializer import CBORSerializer serializers.append(CBORSerializer(batched=True)) serializers.append(CBORSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-CBOR serializer") else: sers.discard(u'cbor') if u'msgpack' in sers: # try MsgPack WAMP serializer try: from autobahn.wamp.serializer import MsgPackSerializer serializers.append(MsgPackSerializer(batched=True)) serializers.append(MsgPackSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-MsgPack serializer") else: sers.discard('msgpack') if u'ubjson' in sers: # try UBJSON WAMP serializer try: from autobahn.wamp.serializer import UBJSONSerializer serializers.append(UBJSONSerializer(batched=True)) serializers.append(UBJSONSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-UBJSON serializer") else: sers.discard(u'ubjson') if u'json' in sers: # try JSON WAMP serializer try: from autobahn.wamp.serializer import JsonSerializer serializers.append(JsonSerializer(batched=True)) serializers.append(JsonSerializer()) except ImportError: self.log.warn( "Warning: could not load WAMP-JSON serializer") else: sers.discard(u'json') if not serializers: raise Exception("no valid WAMP serializers specified") if len(sers) > 0: raise Exception( "invalid WAMP serializers specified (the following were unprocessed) {}" .format(sers)) else: serializers = None websocket.WampWebSocketServerFactory.__init__( self, factory, serializers=serializers, url=config.get('url', None), server=server, externalPort=externalPort) # Crossbar.io node directory self._cbdir = cbdir # transport configuration self._config = config # Jinja2 templates for 404 etc self._templates = templates # cookie tracking if 'cookie' in config: cookie_store_type = config['cookie']['store']['type'] # ephemeral, memory-backed cookie store if cookie_store_type == 'memory': self._cookiestore = CookieStoreMemoryBacked(config['cookie']) self.log.info("Memory-backed cookie store active.") # persistent, file-backed cookie store elif cookie_store_type == 'file': cookie_store_file = os.path.abspath( os.path.join(self._cbdir, config['cookie']['store']['filename'])) self._cookiestore = CookieStoreFileBacked( cookie_store_file, config['cookie']) self.log.info( "File-backed cookie store active {cookie_store_file}", cookie_store_file=cookie_store_file) else: # should not arrive here as the config should have been checked before raise Exception("logic error") else: self._cookiestore = None # set WebSocket options set_websocket_options(self, options)
class Component(component.Component): """ A component establishes a transport and attached a session to a realm using the transport for communication. The transports a component tries to use can be configured, as well as the auto-reconnect strategy. """ log = txaio.make_logger() session_factory = Session """ The factory of the session we will instantiate. """ def _is_ssl_error(self, e): """ Internal helper. """ return isinstance(e, ssl.SSLError) def _check_native_endpoint(self, endpoint): if isinstance(endpoint, dict): if u'tls' in endpoint: tls = endpoint[u'tls'] if isinstance(tls, (dict, bool)): pass elif isinstance(tls, ssl.SSLContext): pass else: raise ValueError( "'tls' configuration must be a dict, bool or " "SSLContext instance") else: raise ValueError( "'endpoint' configuration must be a dict or IStreamClientEndpoint" " provider") # async function def _connect_transport(self, loop, transport, session_factory, done): """ Create and connect a WAMP-over-XXX transport. """ factory = _create_transport_factory(loop, transport, session_factory) # XXX the rest of this should probably be factored into its # own method (or three!)... if transport.proxy: timeout = transport.endpoint.get(u'timeout', 10) # in seconds if type(timeout) not in six.integer_types: raise ValueError( 'invalid type {} for timeout in client endpoint configuration' .format(type(timeout))) # do we support HTTPS proxies? f = loop.create_connection( protocol_factory=factory, host=transport.proxy['host'], port=transport.proxy['port'], ) time_f = asyncio.ensure_future(asyncio.wait_for(f, timeout=timeout)) return self._wrap_connection_future(transport, done, time_f) elif transport.endpoint[u'type'] == u'tcp': version = transport.endpoint.get(u'version', 4) if version not in [4, 6]: raise ValueError( 'invalid IP version {} in client endpoint configuration'. format(version)) host = transport.endpoint[u'host'] if type(host) != six.text_type: raise ValueError( 'invalid type {} for host in client endpoint configuration' .format(type(host))) port = transport.endpoint[u'port'] if type(port) not in six.integer_types: raise ValueError( 'invalid type {} for port in client endpoint configuration' .format(type(port))) timeout = transport.endpoint.get(u'timeout', 10) # in seconds if type(timeout) not in six.integer_types: raise ValueError( 'invalid type {} for timeout in client endpoint configuration' .format(type(timeout))) tls = transport.endpoint.get(u'tls', None) tls_hostname = None # create a TLS enabled connecting TCP socket if tls: if isinstance(tls, dict): for k in tls.keys(): if k not in [u"hostname", u"trust_root"]: raise ValueError( "Invalid key '{}' in 'tls' config".format(k)) hostname = tls.get(u'hostname', host) if type(hostname) != six.text_type: raise ValueError( 'invalid type {} for hostname in TLS client endpoint configuration' .format(hostname)) cert_fname = tls.get(u'trust_root', None) tls_hostname = hostname tls = True if cert_fname is not None: tls = ssl.create_default_context( purpose=ssl.Purpose.SERVER_AUTH, cafile=cert_fname, ) elif isinstance(tls, ssl.SSLContext): # tls=<an SSLContext> is valid tls_hostname = host elif tls in [False, True]: if tls: tls_hostname = host else: raise RuntimeError( 'unknown type {} for "tls" configuration in transport'. format(type(tls))) f = loop.create_connection( protocol_factory=factory, host=host, port=port, ssl=tls, server_hostname=tls_hostname, ) time_f = asyncio.ensure_future(asyncio.wait_for(f, timeout=timeout)) return self._wrap_connection_future(transport, done, time_f) elif transport.endpoint[u'type'] == u'unix': path = transport.endpoint[u'path'] timeout = int(transport.endpoint.get(u'timeout', 10)) # in seconds f = loop.create_unix_connection( protocol_factory=factory, path=path, ) time_f = asyncio.ensure_future(asyncio.wait_for(f, timeout=timeout)) return self._wrap_connection_future(transport, done, time_f) else: assert (False), 'should not arrive here' def _wrap_connection_future(self, transport, done, conn_f): def on_connect_success(result): # async connect call returns a 2-tuple transport, proto = result # if e.g. an SSL handshake fails, we will have # successfully connected (i.e. get here) but need to # 'listen' for the "connection_lost" from the underlying # protocol in case of handshake failure .. so we wrap # it. Also, we don't increment transport.success_count # here on purpose (because we might not succeed). # XXX double-check that asyncio behavior on TLS handshake # failures is in fact as described above orig = proto.connection_lost @wraps(orig) def lost(fail): rtn = orig(fail) if not txaio.is_called(done): # asyncio will call connection_lost(None) in case of # a transport failure, in which case we create an # appropriate exception if fail is None: fail = TransportLost("failed to complete connection") txaio.reject(done, fail) return rtn proto.connection_lost = lost def on_connect_failure(err): transport.connect_failures += 1 # failed to establish a connection in the first place txaio.reject(done, err) txaio.add_callbacks(conn_f, on_connect_success, None) # the errback is added as a second step so it gets called if # there as an error in on_connect_success itself. txaio.add_callbacks(conn_f, None, on_connect_failure) return conn_f # async function def start(self, loop=None): """ This starts the Component, which means it will start connecting (and re-connecting) to its configured transports. A Component runs until it is "done", which means one of: - There was a "main" function defined, and it completed successfully; - Something called ``.leave()`` on our session, and we left successfully; - ``.stop()`` was called, and completed successfully; - none of our transports were able to connect successfully (failure); :returns: a Future which will resolve (to ``None``) when we are "done" or with an error if something went wrong. """ if loop is None: self.log.warn("Using default loop") loop = asyncio.get_event_loop() return self._start(loop=loop)
class Library(object): log = txaio.make_logger() def something(self): self.log.info("info log from library foo={foo}", foo='bar') self.log.debug("debug information")
class WampRawSocketProtocol(Int32StringReceiver): """ Base class for Twisted-based WAMP-over-RawSocket protocols. """ log = txaio.make_logger() def connectionMade(self): if self.factory.debug: self.log.debug("WampRawSocketProtocol: connection made") # the peer we are connected to # try: peer = self.transport.getPeer() except AttributeError: # ProcessProtocols lack getPeer() self.peer = "?" else: self.peer = peer2str(peer) # this will hold an ApplicationSession object # once the RawSocket opening handshake has been # completed # self._session = None # Will hold the negotiated serializer once the opening handshake is complete # self._serializer = None # Will be set to True once the opening handshake is complete # self._handshake_complete = False # Buffer for opening handshake received bytes. # self._handshake_bytes = b'' # Clinet requested maximum length of serialized messages. # self._max_len_send = None def _on_handshake_complete(self): try: self._session = self.factory._factory() self._session.onOpen(self) except Exception as e: # Exceptions raised in onOpen are fatal .. if self.factory.debug: self.log.info( "WampRawSocketProtocol: ApplicationSession constructor / onOpen raised ({0})" .format(e)) self.abort() else: if self.factory.debug: self.log.info("ApplicationSession started.") def connectionLost(self, reason): if self.factory.debug: self.log.info( "WampRawSocketProtocol: connection lost: reason = '{0}'". format(reason)) try: wasClean = isinstance(reason.value, ConnectionDone) self._session.onClose(wasClean) except Exception as e: # silently ignore exceptions raised here .. if self.factory.debug: self.log.info( "WampRawSocketProtocol: ApplicationSession.onClose raised ({0})" .format(e)) self._session = None def stringReceived(self, payload): if self.factory.debug: self.log.info("WampRawSocketProtocol: RX octets: {0}".format( binascii.hexlify(payload))) try: for msg in self._serializer.unserialize(payload): if self.factory.debug: self.log.info( "WampRawSocketProtocol: RX WAMP message: {0}".format( msg)) self._session.onMessage(msg) except ProtocolError as e: self.log.info(str(e)) if self.factory.debug: self.log.info( "WampRawSocketProtocol: WAMP Protocol Error ({0}) - aborting connection" .format(e)) self.abort() except Exception as e: if self.factory.debug: self.log.info( "WampRawSocketProtocol: WAMP Internal Error ({0}) - aborting connection" .format(e)) self.abort() def send(self, msg): """ Implements :func:`autobahn.wamp.interfaces.ITransport.send` """ if self.isOpen(): if self.factory.debug: self.log.info( "WampRawSocketProtocol: TX WAMP message: {0}".format(msg)) try: payload, _ = self._serializer.serialize(msg) except Exception as e: # all exceptions raised from above should be serialization errors .. raise SerializationError( "WampRawSocketProtocol: unable to serialize WAMP application payload ({0})" .format(e)) else: self.sendString(payload) if self.factory.debug: self.log.info( "WampRawSocketProtocol: TX octets: {0}".format( binascii.hexlify(payload))) else: raise TransportLost() def isOpen(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen` """ return self._session is not None def close(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.close` """ if self.isOpen(): self.transport.loseConnection() else: raise TransportLost() def abort(self): """ Implements :func:`autobahn.wamp.interfaces.ITransport.abort` """ if self.isOpen(): if hasattr(self.transport, 'abortConnection'): # ProcessProtocol lacks abortConnection() self.transport.abortConnection() else: self.transport.loseConnection() else: raise TransportLost()
def install_optimal_reactor(verbose=False): """ Try to install the optimal Twisted reactor for this platform. :param verbose: If ``True``, print what happens. :type verbose: bool """ log = txaio.make_logger() import sys from twisted.python import reflect # determine currently installed reactor, if any ## if 'twisted.internet.reactor' in sys.modules: current_reactor = reflect.qual(sys.modules['twisted.internet.reactor'].__class__).split('.')[-1] else: current_reactor = None # depending on platform, install optimal reactor ## if 'bsd' in sys.platform or sys.platform.startswith('darwin'): # *BSD and MacOSX ## if current_reactor != 'KQueueReactor': try: from twisted.internet import kqreactor kqreactor.install() except: log.critical("Running on *BSD or MacOSX, but cannot install kqueue Twisted reactor") log.warn("{tb}", tb=traceback.format_exc()) else: log.debug("Running on *BSD or MacOSX and optimal reactor (kqueue) was installed.") else: log.debug("Running on *BSD or MacOSX and optimal reactor (kqueue) already installed.") elif sys.platform in ['win32']: # Windows ## if current_reactor != 'IOCPReactor': try: from twisted.internet.iocpreactor import reactor as iocpreactor iocpreactor.install() except: log.critical("Running on Windows, but cannot install IOCP Twisted reactor") log.warn("{tb}", tb=traceback.format_exc()) else: log.debug("Running on Windows and optimal reactor (ICOP) was installed.") else: log.debug("Running on Windows and optimal reactor (ICOP) already installed.") elif sys.platform.startswith('linux'): # Linux ## if current_reactor != 'EPollReactor': try: from twisted.internet import epollreactor epollreactor.install() except: log.critical("Running on Linux, but cannot install Epoll Twisted reactor") log.warn("{tb}", tb=traceback.format_exc()) else: log.debug("Running on Linux and optimal reactor (epoll) was installed.") else: log.debug("Running on Linux and optimal reactor (epoll) already installed.") else: try: from twisted.internet import default as defaultreactor defaultreactor.install() except: log.critical("Could not install default Twisted reactor for this platform") log.warn("{tb}", tb=traceback.format_exc()) from twisted.internet import reactor txaio.config.loop = reactor
from orthogonalspace.universe import universes, UniverseNotFound, find_universe, Universe from orthogonalspace.world_generator import WorldGenerator from orthogonalspace.views import register import uuid import txaio log = txaio.make_logger() @register('universe.create') async def universe_create(engine, name=None, parameters=None): new_universe = Universe(str(uuid.uuid4()), name, world_generator=WorldGenerator(parameters or {})) universes[new_universe.id] = new_universe return {"success": True, "universe": new_universe, "reason": ""} @register('universe.list') async def universe_list(engine): return universes @register('universe.get') async def universe_get(engine, id): try: return {"success": True, "universe": find_universe(id), "reason": ""} except UniverseNotFound:
class WampRawSocketServerFactory(rawsocket.WampRawSocketServerFactory): """ Crossbar.io WAMP-over-RawSocket server factory. """ protocol = WampRawSocketServerProtocol log = make_logger() def __init__(self, factory, config): # remember transport configuration # self._config = config # explicit list of WAMP serializers # if 'serializers' in config: serializers = [] sers = set(config['serializers']) if 'flatbuffers' in sers: # try FlatBuffers WAMP serializer try: from autobahn.wamp.serializer import FlatBuffersSerializer serializers.append(FlatBuffersSerializer()) except ImportError: self.log.warn("Warning: could not load WAMP-FlatBuffers serializer") else: sers.discard('flatbuffers') if 'cbor' in sers: # try CBOR WAMP serializer try: from autobahn.wamp.serializer import CBORSerializer serializers.append(CBORSerializer()) except ImportError: self.log.warn("Warning: could not load WAMP-CBOR serializer") else: sers.discard('cbor') if 'msgpack' in sers: # try MsgPack WAMP serializer try: from autobahn.wamp.serializer import MsgPackSerializer serializer = MsgPackSerializer() serializer._serializer.ENABLE_V5 = False # FIXME serializers.append(serializer) except ImportError: self.log.warn("Warning: could not load WAMP-MsgPack serializer") else: sers.discard('msgpack') if 'ubjson' in sers: # try UBJSON WAMP serializer try: from autobahn.wamp.serializer import UBJSONSerializer serializers.append(UBJSONSerializer(batched=True)) serializers.append(UBJSONSerializer()) except ImportError: self.log.warn("Warning: could not load WAMP-UBJSON serializer") else: sers.discard('ubjson') if 'json' in sers: # try JSON WAMP serializer try: from autobahn.wamp.serializer import JsonSerializer serializers.append(JsonSerializer()) except ImportError: self.log.warn("Warning: could not load WAMP-JSON serializer") else: sers.discard('json') if not serializers: raise Exception("no valid WAMP serializers specified") if len(sers) > 0: raise Exception("invalid WAMP serializers specified (the following were unprocessed) {}".format(sers)) else: serializers = None rawsocket.WampRawSocketServerFactory.__init__(self, factory, serializers) if 'options' in config: set_rawsocket_options(self, config['options']) self.log.debug("RawSocket transport factory created using {serializers} serializers, max. message size {maxsize}", serializers=serializers, maxsize=self._max_message_size)
class Application(object): """ A WAMP application. The application object provides a simple way of creating, debugging and running WAMP application components. """ log = txaio.make_logger() def __init__(self, prefix=None): """ :param prefix: The application URI prefix to use for procedures and topics, e.g. ``"com.example.myapp"``. :type prefix: unicode """ self._prefix = prefix # procedures to be registered once the app session has joined the router/realm self._procs = [] # event handler to be subscribed once the app session has joined the router/realm self._handlers = [] # app lifecycle signal handlers self._signals = {} # once an app session is connected, this will be here self.session = None def __call__(self, config): """ Factory creating a WAMP application session for the application. :param config: Component configuration. :type config: Instance of :class:`autobahn.wamp.types.ComponentConfig` :returns: obj -- An object that derives of :class:`autobahn.twisted.wamp.ApplicationSession` """ assert (self.session is None) self.session = _ApplicationSession(config, self) return self.session def run(self, url="ws://*****:*****@app.register('com.myapp.add2') def add2(a, b): return a + b Above function can then be called remotely over WAMP using the URI `com.myapp.add2` the function was registered under. If no URI is given, the URI is constructed from the application URI prefix and the Python function name. :Example: .. code-block:: python app = Application('com.myapp') # implicit URI will be 'com.myapp.add2' @app.register() def add2(a, b): return a + b If the function `yields` (is a co-routine), the `@inlineCallbacks` decorator will be applied automatically to it. In that case, if you wish to return something, you should use `returnValue`: :Example: .. code-block:: python from twisted.internet.defer import returnValue @app.register('com.myapp.add2') def add2(a, b): res = yield stuff(a, b) returnValue(res) :param uri: The URI of the procedure to register under. :type uri: unicode """ def decorator(func): if uri: _uri = uri else: assert (self._prefix is not None) _uri = "{0}.{1}".format(self._prefix, func.__name__) if inspect.isgeneratorfunction(func): func = inlineCallbacks(func) self._procs.append((_uri, func)) return func return decorator def subscribe(self, uri=None): """ Decorator attaching a function as an event handler. The first argument of the decorator should be the URI of the topic to subscribe to. If no URI is given, the URI is constructed from the application URI prefix and the Python function name. If the function yield, it will be assumed that it's an asynchronous process and inlineCallbacks will be applied to it. :Example: .. code-block:: python @app.subscribe('com.myapp.topic1') def onevent1(x, y): print("got event on topic1", x, y) :param uri: The URI of the topic to subscribe to. :type uri: unicode """ def decorator(func): if uri: _uri = uri else: assert (self._prefix is not None) _uri = "{0}.{1}".format(self._prefix, func.__name__) if inspect.isgeneratorfunction(func): func = inlineCallbacks(func) self._handlers.append((_uri, func)) return func return decorator def signal(self, name): """ Decorator attaching a function as handler for application signals. Signals are local events triggered internally and exposed to the developer to be able to react to the application lifecycle. If the function yield, it will be assumed that it's an asynchronous coroutine and inlineCallbacks will be applied to it. Current signals : - `onjoined`: Triggered after the application session has joined the realm on the router and registered/subscribed all procedures and event handlers that were setup via decorators. - `onleave`: Triggered when the application session leaves the realm. .. code-block:: python @app.signal('onjoined') def _(): # do after the app has join a realm :param name: The name of the signal to watch. :type name: unicode """ def decorator(func): if inspect.isgeneratorfunction(func): func = inlineCallbacks(func) self._signals.setdefault(name, []).append(func) return func return decorator @inlineCallbacks def _fire_signal(self, name, *args, **kwargs): """ Utility method to call all signal handlers for a given signal. :param name: The signal name. :type name: str """ for handler in self._signals.get(name, []): try: # FIXME: what if the signal handler is not a coroutine? # Why run signal handlers synchronously? yield handler(*args, **kwargs) except Exception as e: # FIXME self.log.info( "Warning: exception in signal handler swallowed: {err}", err=e)
class UniSocketServerProtocol(Protocol): """ """ log = txaio.make_logger() def __init__(self, factory, addr): self._factory = factory self._addr = addr self._proto = None self._data = b'' def dataReceived(self, data): if self._proto: # we already determined the actual protocol to speak. just forward received data self._proto.dataReceived(data) else: if data[0:1] == b'\x7F': # switch to RawSocket .. if not self._factory._rawsocket_factory: self.log.warn( 'client wants to talk RawSocket, but we have no factory configured for that' ) self.transport.loseConnection() else: self.log.debug('switching to RawSocket') self._proto = self._factory._rawsocket_factory.buildProtocol( self._addr) self._proto.transport = self.transport self._proto.connectionMade() self._proto.dataReceived(data) elif data[0:1] == b'\x10': # switch to MQTT if not self._factory._mqtt_factory: self.log.warn( 'client wants to talk MQTT, but we have no factory configured for that' ) self.transport.loseConnection() else: self.log.debug('switching to MQTT') self._proto = self._factory._mqtt_factory.buildProtocol( self._addr) self._proto.transport = self.transport self._proto.connectionMade(True) self._proto.dataReceived(data) else: # switch to HTTP, further subswitching to WebSocket (from Autobahn, like a WebSocketServerFactory) # or Web (from Twisted Web, like a Site). the subswitching is based on HTTP Request-URI. self._data += data request_line_end = self._data.find(b'\x0d\x0a') request_line = self._data[:request_line_end] # HTTP request line, eg 'GET /ws HTTP/1.1' rl = request_line.split() # we only check for number of parts in HTTP request line, not for HTTP method # nor HTTP version - checking these things is the job of the protocol instance # we switch to (as only the specific protocol knows what is allowed for the other # parts). iow, we solely switch based on the HTTP Request-URI. if len(rl) != 3: self.log.warn( 'received invalid HTTP request line for HTTP protocol subswitch: "{request_line}"', request_line=request_line) self.transport.loseConnection() return request_uri = rl[1].strip() # support IRIs: "All non-ASCII code points in the IRI should next be encoded as UTF-8, # and the resulting bytes percent-encoded, to produce a valid URI." request_uri = urlparse.unquote(request_uri.decode('ascii')) # the first component for the URI requested, eg for "/ws/foo/bar", it'll be "ws", and "/" # will map to "" request_uri_first_component = [ x.strip() for x in request_uri.split('/') if x.strip() != '' ] if len(request_uri_first_component) > 0: request_uri_first_component = request_uri_first_component[ 0] else: request_uri_first_component = '' self.log.debug( 'switching to HTTP on Request-URI {request_uri}, mapping part {request_uri_first_component}', request_uri=request_uri, request_uri_first_component=request_uri_first_component) # _first_ try to find a matching URL prefix in the WebSocket factory map .. if self._factory._websocket_factory_map: for uri_component, websocket_factory in self._factory._websocket_factory_map.items( ): if request_uri_first_component == uri_component: self._proto = websocket_factory.buildProtocol( self._addr) self.log.debug( 'found and build websocket protocol for request URI {request_uri}, mapping part {request_uri_first_component}', request_uri=request_uri, request_uri_first_component= request_uri_first_component) break self.log.debug( 'no mapping found for request URI {request_uri}, trying to map part {request_uri_first_component}', request_uri=request_uri, request_uri_first_component=request_uri_first_component ) if not self._proto: # mmh, still no protocol, so there has to be a Twisted Web (a "Site") factory # hooked on this URL if self._factory._web_factory: self.log.debug( 'switching to HTTP/Web on Request-URI {request_uri}', request_uri=request_uri) self._proto = self._factory._web_factory.buildProtocol( self._addr) # watch out: this is definitely a hack! self._proto._channel.transport = self.transport else: self.log.warn( 'client wants to talk HTTP/Web, but we have no factory configured for that' ) self.transport.loseConnection() return else: # we've got a protocol instance already created from a WebSocket factory. cool. self.log.debug( 'switching to HTTP/WebSocket on Request-URI {request_uri}', request_uri=request_uri) # is this a hack? or am I allowed to do this? self._proto.transport = self.transport # fake connection, forward data received beginning from the very first octet. this allows # to use the protocol being switched to in a standard, unswitched context without modification self._proto.connectionMade() self._proto.dataReceived(self._data) self._data = None def connectionLost(self, reason): if self._proto: self._proto.connectionLost(reason)
class PendingAuthTLS(PendingAuth): """ Pending WAMP-TLS authentication. """ AUTHMETHOD = 'tls' log = make_logger() def __init__(self, pending_session_id: int, transport_details: TransportDetails, realm_container: IRealmContainer, config: Dict[str, Any]): super(PendingAuthTLS, self).__init__( pending_session_id, transport_details, realm_container, config, ) # https://tools.ietf.org/html/rfc5056 # https://tools.ietf.org/html/rfc5929 # https://www.ietf.org/proceedings/90/slides/slides-90-uta-0.pdf self._channel_id = transport_details.channel_id.get( 'tls-unique', None) if transport_details.channel_id else None self._peer_cert = transport_details.peer_cert # for static-mode, the config has principals as a dict indexed # by authid, but we need the reverse map: cert-sha1 -> principal self._cert_sha1_to_principal = None if self._config['type'] == 'static': self._cert_sha1_to_principal = {} if 'principals' in self._config: for authid, principal in self._config['principals'].items(): self._cert_sha1_to_principal[ principal['certificate-sha1']] = { 'authid': authid, 'role': principal['role'] } def hello(self, realm, details): # we must have a client TLS certificate to continue if not self._peer_cert: return Deny(message='client did not send a TLS client certificate') # remember the realm the client requested to join (if any) self._realm = realm # remember the authid the client wants to identify as (if any) self._authid = details.authid # use static principal database from configuration if self._config['type'] == 'static': self._authprovider = 'static' client_cert_sha1 = self._peer_cert['sha1'] if client_cert_sha1 in self._cert_sha1_to_principal: principal = self._cert_sha1_to_principal[client_cert_sha1] error = self._assign_principal(principal) if error: return error return self._accept() else: return Deny(message='no principal with authid "{}" exists'. format(client_cert_sha1)) # use configured procedure to dynamically get a ticket for the principal elif self._config['type'] == 'dynamic': self._authprovider = 'dynamic' init_d = txaio.as_future(self._init_dynamic_authenticator) def init(result): if result: return result self._session_details[ 'authmethod'] = self._authmethod # from AUTHMETHOD, via base self._session_details['authextra'] = details.authextra d = self._authenticator_session.call(self._authenticator, realm, details.authid, self._session_details) def on_authenticate_ok(_principal): _error = self._assign_principal(_principal) if _error: return _error # FIXME: not sure about this .. TLS is a transport-level auth mechanism .. so forward # self._transport._authid = self._authid # self._transport._authrole = self._authrole # self._transport._authmethod = self._authmethod # self._transport._authprovider = self._authprovider # self._transport._authextra = self._authextra return self._accept() def on_authenticate_error(err): return self._marshal_dynamic_authenticator_error(err) d.addCallbacks(on_authenticate_ok, on_authenticate_error) return d init_d.addBoth(init) return init_d else: # should not arrive here, as config errors should be caught earlier return Deny( message= 'invalid authentication configuration (authentication type "{}" is unknown)' .format(self._config['type'])) def authenticate(self, signature): # should not arrive here! raise Exception( "internal error (WAMP-TLS does not implement AUTHENTICATE)")
class WampMQTTServerProtocol(Protocol): log = make_logger() def __init__(self, reactor): self._mqtt = MQTTServerTwistedProtocol(self, reactor) self._request_to_packetid = {} self._waiting_for_connect = None self._inflight_subscriptions = {} self._subrequest_to_mqtt_subrequest = {} self._subrequest_callbacks = {} self._topic_lookup = {} self._wamp_session = None def on_message(self, inc_msg): try: self._on_message(inc_msg) except: self.log.failure() @inlineCallbacks def _on_message(self, inc_msg): self.log.debug('WampMQTTServerProtocol._on_message(inc_msg={inc_msg})', inc_msg=inc_msg) if isinstance(inc_msg, message.Challenge): assert inc_msg.method == u"ticket" msg = message.Authenticate(signature=self._pw_challenge) del self._pw_challenge self._wamp_session.onMessage(msg) elif isinstance(inc_msg, message.Welcome): self._waiting_for_connect.callback((0, False)) elif isinstance(inc_msg, message.Abort): self._waiting_for_connect.callback((1, False)) elif isinstance(inc_msg, message.Subscribed): # Successful subscription! mqtt_id = self._subrequest_to_mqtt_subrequest[inc_msg.request] self._inflight_subscriptions[mqtt_id][inc_msg.request]["response"] = 0 self._topic_lookup[inc_msg.subscription] = self._inflight_subscriptions[mqtt_id][inc_msg.request]["topic"] if -1 not in [x["response"] for x in self._inflight_subscriptions[mqtt_id].values()]: self._subrequest_callbacks[mqtt_id].callback(None) elif (isinstance(inc_msg, message.Error) and inc_msg.request_type == message.Subscribe.MESSAGE_TYPE): # Failed subscription :( mqtt_id = self._subrequest_to_mqtt_subrequest[inc_msg.request] self._inflight_subscriptions[mqtt_id][inc_msg.request]["response"] = 128 if -1 not in [x["response"] for x in self._inflight_subscriptions[mqtt_id].values()]: self._subrequest_callbacks[mqtt_id].callback(None) elif isinstance(inc_msg, message.Event): topic = inc_msg.topic or self._topic_lookup[inc_msg.subscription] try: payload_format, mapped_topic, payload = yield self.factory.transform_wamp(topic, inc_msg) except: self.log.failure() else: self._mqtt.send_publish(mapped_topic, 0, payload, retained=inc_msg.retained or False) elif isinstance(inc_msg, message.Goodbye): if self._mqtt.transport: self._mqtt.transport.loseConnection() self._mqtt.transport = None else: self.log.warn('cannot process unimplemented message: {inc_msg}', inc_msg=inc_msg) def connectionMade(self, ignore_handshake=False): if ignore_handshake or not ISSLTransport.providedBy(self.transport): self._when_ready() def connectionLost(self, reason): if self._wamp_session: msg = message.Goodbye() self._wamp_session.onMessage(msg) del self._wamp_session def handshakeCompleted(self): self._when_ready() def _when_ready(self): if self._wamp_session: return self._mqtt.transport = self.transport self._wamp_session = RouterSession(self.factory._router_session_factory._routerFactory) self._wamp_transport = WampTransport(self.factory, self.on_message, self.transport) self._wamp_session.onOpen(self._wamp_transport) self._wamp_session._transport_config = self.factory._options def process_connect(self, packet): """ Process the initial Connect message from the MQTT client. This should return a pair `(accept_conn, session_present)`, where `accept_conn` is a return code: 0: connection accepted 1-5: connection refused (see MQTT spec 3.2.2.3) """ # Connect(client_id='paho/4E23D8C09DD9C6CF2C', # flags=ConnectFlags(username=False, # password=False, # will=False, # will_retain=False, # will_qos=0, # clean_session=True, # reserved=False), # keep_alive=60, # will_topic=None, # will_message=None, # username=None, # password=None) self.log.info('WampMQTTServerProtocol.process_connect(packet={packet})', packet=packet) # we don't support session resumption: https://github.com/crossbario/crossbar/issues/892 if not packet.flags.clean_session: self.log.warn('denying MQTT connect from {peer}, as the clients wants to resume a session (which we do not support)', peer=peer2str(self.transport.getPeer())) return succeed((1, False)) # we won't support QoS 2: https://github.com/crossbario/crossbar/issues/1046 if packet.flags.will and packet.flags.will_qos not in [0, 1]: self.log.warn('denying MQTT connect from {peer}, as the clients wants to provide a "last will" event with QoS {will_qos} (and we only support QoS 0/1 here)', peer=peer2str(self.transport.getPeer()), will_qos=packet.flags.will_qos) return succeed((1, False)) # this will be resolved when the MQTT connect handshake is completed self._waiting_for_connect = Deferred() roles = { u"subscriber": role.RoleSubscriberFeatures( payload_transparency=True, pattern_based_subscription=True), u"publisher": role.RolePublisherFeatures( payload_transparency=True, x_acknowledged_event_delivery=True) } realm = self.factory._options.get(u'realm', None) authmethods = [] authextra = { u'mqtt': { u'client_id': packet.client_id, u'will': bool(packet.flags.will), u'will_topic': packet.will_topic } } if ISSLTransport.providedBy(self.transport): authmethods.append(u"tls") if packet.username and packet.password: authmethods.append(u"ticket") msg = message.Hello( realm=realm, roles=roles, authmethods=authmethods, authid=packet.username, authextra=authextra) self._pw_challenge = packet.password else: authmethods.append(u"anonymous") msg = message.Hello( realm=realm, roles=roles, authmethods=authmethods, authid=packet.client_id, authextra=authextra) self._wamp_session.onMessage(msg) if packet.flags.will: # it's unclear from the MQTT spec whether a) the publication of the last will # is to happen in-band during "connect", and if it fails, deny the connection, # or b) the last will publication happens _after_ "connect", and the connection # succeeds regardless whether the last will publication succeeds or not. # # we opt for b) here! # @inlineCallbacks @self._waiting_for_connect.addCallback def process_will(res): self.log.info() payload_format, mapped_topic, options = yield self.factory.transform_mqtt(packet.will_topic, packet.will_message) request = util.id() msg = message.Call( request=request, procedure=u"wamp.session.add_testament", args=[ mapped_topic, options.get('args', None), options.get('kwargs', None), { # specifiy "retain" for when the testament (last will) # will be auto-published by the broker later u'retain': bool(packet.flags.will_retain) } ]) self._wamp_session.onMessage(msg) returnValue(res) return self._waiting_for_connect @inlineCallbacks def _publish(self, event, acknowledge=None): """ Given a MQTT event, create a WAMP Publish message and forward that on the forwarding WAMP session. """ try: payload_format, mapped_topic, options = yield self.factory.transform_mqtt(event.topic_name, event.payload) except: self.log.failure() return request = util.id() msg = message.Publish( request=request, topic=mapped_topic, exclude_me=False, acknowledge=acknowledge, retain=event.retain, **options) self._wamp_session.onMessage(msg) if event.qos_level > 0: self._request_to_packetid[request] = event.packet_identifier returnValue(0) def process_publish_qos_0(self, event): try: return self._publish(event) except: self.log.failure() def process_publish_qos_1(self, event): try: return self._publish(event, acknowledge=True) except: self.log.failure() def process_puback(self, event): return def process_pubrec(self, event): return def process_pubrel(self, event): return def process_pubcomp(self, event): return def process_subscribe(self, packet): packet_watch = OrderedDict() d = Deferred() @d.addCallback def _(ign): self._mqtt.send_suback(packet.packet_identifier, [x["response"] for x in packet_watch.values()]) del self._inflight_subscriptions[packet.packet_identifier] del self._subrequest_callbacks[packet.packet_identifier] self._subrequest_callbacks[packet.packet_identifier] = d self._inflight_subscriptions[packet.packet_identifier] = packet_watch for n, x in enumerate(packet.topic_requests): topic, match = _mqtt_topicfilter_to_wamp(x.topic_filter) self.log.info('process_subscribe -> topic={topic}, match={match}', topic=topic, match=match) request_id = util.id() msg = message.Subscribe( request=request_id, topic=topic, match=match, get_retained=True, ) try: packet_watch[request_id] = {"response": -1, "topic": x.topic_filter} self._subrequest_to_mqtt_subrequest[request_id] = packet.packet_identifier self._wamp_session.onMessage(msg) except: self.log.failure() packet_watch[request_id] = {"response": 128} @inlineCallbacks def process_unsubscribe(self, packet): for topic in packet.topics: if topic in self._subscriptions: yield self._subscriptions.pop(topic).unsubscribe() return def dataReceived(self, data): self._mqtt.dataReceived(data)
def run(components, log_level='info'): """ High-level API to run a series of components. This will only return once all the components have stopped (including, possibly, after all re-connections have failed if you have re-connections enabled). Under the hood, this calls XXX fixme for asyncio -- if you wish to manage the loop yourself, use the :meth:`autobahn.asyncio.component.Component.start` method to start each component yourself. :param components: the Component(s) you wish to run :type components: Component or list of Components :param log_level: a valid log-level (or None to avoid calling start_logging) :type log_level: string """ # actually, should we even let people "not start" the logging? I'm # not sure that's wise... (double-check: if they already called # txaio.start_logging() what happens if we call it again?) if log_level is not None: txaio.start_logging(level=log_level) loop = asyncio.get_event_loop() if loop.is_closed(): asyncio.set_event_loop(asyncio.new_event_loop()) loop = asyncio.get_event_loop() txaio.config.loop = loop log = txaio.make_logger() # see https://github.com/python/asyncio/issues/341 asyncio has # "odd" handling of KeyboardInterrupt when using Tasks (as # run_until_complete does). Another option is to just resture # default SIGINT handling, which is to exit: # import signal # signal.signal(signal.SIGINT, signal.SIG_DFL) @asyncio.coroutine def nicely_exit(signal): log.info("Shutting down due to {signal}", signal=signal) tasks = asyncio.Task.all_tasks() for task in tasks: # Do not cancel the current task. if task is not asyncio.Task.current_task(): task.cancel() def cancel_all_callback(fut): try: fut.result() except asyncio.CancelledError: log.debug("All task cancelled") except Exception as e: log.error("Error while shutting down: {exception}", exception=e) finally: loop.stop() fut = asyncio.gather(*tasks) fut.add_done_callback(cancel_all_callback) try: loop.add_signal_handler( signal.SIGINT, lambda: asyncio.ensure_future(nicely_exit("SIGINT"))) loop.add_signal_handler( signal.SIGTERM, lambda: asyncio.ensure_future(nicely_exit("SIGTERM"))) except NotImplementedError: # signals are not available on Windows pass def done_callback(loop, arg): loop.stop() # returns a future; could run_until_complete() but see below component._run(loop, components, done_callback) try: loop.run_forever() # this is probably more-correct, but then you always get # "Event loop stopped before Future completed": # loop.run_until_complete(f) except asyncio.CancelledError: pass # finally: # signal.signal(signal.SIGINT, signal.SIG_DFL) # signal.signal(signal.SIGTERM, signal.SIG_DFL) # Close the event loop at the end, otherwise an exception is # thrown. https://bugs.python.org/issue23548 loop.close()
class CookieStore(object): """ Cookie store common base. """ log = make_logger() def __init__(self, config): """ Ctor. :param config: The cookie configuration. :type config: dict """ self._config = config # name of the HTTP cookie in use self._cookie_id_field = config.get('name', 'cbtid') # length of the cookie (random) ID value self._cookie_id_field_length = int(config.get('length', 24)) # lifetime of the cookie in seconds (http://tools.ietf.org/html/rfc6265#page-20) self._cookie_max_age = int(config.get('max_age', 86400 * 7)) # transient cookie database self._cookies = {} self.log.debug("Cookie stored created with config {config}", config=config) def parse(self, headers): """ Parse HTTP header for cookie. If cookie is found, return cookie ID, else return None. """ self.log.debug("Parsing cookie from {headers}", headers=headers) # see if there already is a cookie set .. if 'cookie' in headers: try: cookie = http_cookies.SimpleCookie() cookie.load(str(headers['cookie'])) except http_cookies.CookieError: pass else: if self._cookie_id_field in cookie: cbtid = cookie[self._cookie_id_field].value if cbtid in self._cookies: return cbtid return None def create(self): """ Create a new cookie, returning the cookie ID and cookie header value. """ # http://tools.ietf.org/html/rfc6265#page-20 # 0: delete cookie # -1: preserve cookie until browser is closed cbtid = util.newid(self._cookie_id_field_length) # cookie tracking data cbtData = { # UTC timestamp when the cookie was created 'created': util.utcnow(), # maximum lifetime of the tracking/authenticating cookie 'max_age': self._cookie_max_age, # when a cookie has been set, and the WAMP session # was successfully authenticated thereafter, the latter # auth info is store here 'authid': None, 'authrole': None, 'authrealm': None, 'authmethod': None, # set of WAMP transports (WebSocket connections) this # cookie is currently used on 'connections': set() } self._cookies[cbtid] = cbtData self.log.debug("New cookie {cbtid} created", cbtid=cbtid) # do NOT add the "secure" cookie attribute! "secure" refers to the # scheme of the Web page that triggered the WS, not WS itself!! # return cbtid, '%s=%s;max-age=%d' % (self._cookie_id_field, cbtid, cbtData['max_age']) def exists(self, cbtid): """ Check if cookie with given ID exists. """ cookie_exists = cbtid in self._cookies self.log.debug("Cookie {cbtid} exists = {cookie_exists}", cbtid=cbtid, cookie_exists=cookie_exists) return cookie_exists def getAuth(self, cbtid): """ Return `(authid, authrole, authmethod, authrealm)` tuple given cookie ID. """ if cbtid in self._cookies: c = self._cookies[cbtid] cookie_auth_info = c['authid'], c['authrole'], c['authmethod'], c[ 'authrealm'] else: cookie_auth_info = None, None, None, None self.log.debug( "Cookie auth info for {cbtid} retrieved: {cookie_auth_info}", cbtid=cbtid, cookie_auth_info=cookie_auth_info) return cookie_auth_info def setAuth(self, cbtid, authid, authrole, authmethod, authrealm): """ Set `(authid, authrole, authmethod)` triple for given cookie ID. """ if cbtid in self._cookies: c = self._cookies[cbtid] c['authid'] = authid c['authrole'] = authrole c['authrealm'] = authrealm c['authmethod'] = authmethod def addProto(self, cbtid, proto): """ Add given WebSocket connection to the set of connections associated with the cookie having the given ID. Return the new count of connections associated with the cookie. """ self.log.debug("Adding proto {proto} to cookie {cbtid}", proto=proto, cbtid=cbtid) if cbtid in self._cookies: self._cookies[cbtid]['connections'].add(proto) return len(self._cookies[cbtid]['connections']) else: return 0 def dropProto(self, cbtid, proto): """ Remove given WebSocket connection from the set of connections associated with the cookie having the given ID. Return the new count of connections associated with the cookie. """ self.log.debug("Removing proto {proto} from cookie {cbtid}", proto=proto, cbtid=cbtid) # remove this WebSocket connection from the set of connections # associated with the same cookie if cbtid in self._cookies: self._cookies[cbtid]['connections'].discard(proto) return len(self._cookies[cbtid]['connections']) else: return 0 def getProtos(self, cbtid): """ Get all WebSocket connections currently associated with the cookie. """ if cbtid in self._cookies: return self._cookies[cbtid]['connections'] else: return []
class Broker(object): """ Basic WAMP broker. """ log = make_logger() def __init__(self, router, reactor, options=None): """ :param router: The router this dealer is part of. :type router: Object that implements :class:`crossbar.router.interfaces.IRouter`. :param options: Router options. :type options: Instance of :class:`crossbar.router.types.RouterOptions`. """ self._router = router self._reactor = reactor self._options = options or RouterOptions() # generator for WAMP request IDs self._request_id_gen = util.IdGenerator() # subscription map managed by this broker self._subscription_map = UriObservationMap() # map: session -> set of subscriptions (needed for detach) self._session_to_subscriptions = {} # check all topic URIs with strict rules self._option_uri_strict = self._options.uri_check == RouterOptions.URI_CHECK_STRICT # supported features from "WAMP Advanced Profile" self._role_features = role.RoleBrokerFeatures( publisher_identification=True, pattern_based_subscription=True, session_meta_api=True, subscription_meta_api=True, subscriber_blackwhite_listing=True, publisher_exclusion=True, subscription_revocation=True, event_retention=True, payload_transparency=True, payload_encryption_cryptobox=True) # store for event history if self._router._store: self._event_store = self._router._store.event_store else: self._event_store = None # if there is a store, let the store attach itself to all the subscriptions # it is configured to track if self._event_store: self._event_store.attach_subscription_map(self._subscription_map) def attach(self, session): """ Implements :func:`crossbar.router.interfaces.IBroker.attach` """ if session not in self._session_to_subscriptions: self._session_to_subscriptions[session] = set() else: raise Exception(u"session with ID {} already attached".format( session._session_id)) def detach(self, session): """ Implements :func:`crossbar.router.interfaces.IBroker.detach` """ if session in self._session_to_subscriptions: for subscription in self._session_to_subscriptions[session]: was_subscribed, was_last_subscriber = self._subscription_map.drop_observer( session, subscription) was_deleted = False # delete it if there are no subscribers and no retained events # if was_subscribed and was_last_subscriber and not subscription.extra.retained_events: was_deleted = True self._subscription_map.delete_observation(subscription) # publish WAMP meta events, if we have a service session, but # not for the meta API itself! # if self._router._realm and \ self._router._realm.session and \ not subscription.uri.startswith(u'wamp.'): def _publish(): service_session = self._router._realm.session options = types.PublishOptions( correlation_id=None, correlation_is_anchor=True, correlation_is_last=False) if was_subscribed: service_session.publish( u'wamp.subscription.on_unsubscribe', session._session_id, subscription.id, options=options, ) if was_deleted: options.correlation_is_last = True service_session.publish( u'wamp.subscription.on_delete', session._session_id, subscription.id, options=options, ) # we postpone actual sending of meta events until we return to this client session self._reactor.callLater(0, _publish) del self._session_to_subscriptions[session] else: raise Exception("session with ID {} not attached".format( session._session_id)) def _filter_publish_receivers(self, receivers, publish): """ Internal helper. Does all filtering on a candidate set of Publish receivers, based on all the white/blacklist options in 'publish'. """ # filter by "eligible" receivers # if publish.eligible: # map eligible session IDs to eligible sessions eligible = set() for session_id in publish.eligible: if session_id in self._router._session_id_to_session: eligible.add( self._router._session_id_to_session[session_id]) # filter receivers for eligible sessions receivers = eligible & receivers # if "eligible_authid" we only accept receivers that have the correct authid if publish.eligible_authid: eligible = set() for aid in publish.eligible_authid: eligible.update( self._router._authid_to_sessions.get(aid, set())) receivers = receivers & eligible # if "eligible_authrole" we only accept receivers that have the correct authrole if publish.eligible_authrole: eligible = set() for ar in publish.eligible_authrole: eligible.update( self._router._authrole_to_sessions.get(ar, set())) receivers = receivers & eligible # remove "excluded" receivers # if publish.exclude: # map excluded session IDs to excluded sessions exclude = set() for s in publish.exclude: if s in self._router._session_id_to_session: exclude.add(self._router._session_id_to_session[s]) # filter receivers for excluded sessions if exclude: receivers = receivers - exclude # remove auth-id based receivers if publish.exclude_authid: for aid in publish.exclude_authid: receivers = receivers - self._router._authid_to_sessions.get( aid, set()) # remove authrole based receivers if publish.exclude_authrole: for ar in publish.exclude_authrole: receivers = receivers - self._router._authrole_to_sessions.get( ar, set()) return receivers def processPublish(self, session, publish): """ Implements :func:`crossbar.router.interfaces.IBroker.processPublish` """ if self._router.is_traced: if not publish.correlation_id: publish.correlation_id = self._router.new_correlation_id() publish.correlation_is_anchor = True if not publish.correlation_uri: publish.correlation_uri = publish.topic # check topic URI: for PUBLISH, must be valid URI (either strict or loose), and # all URI components must be non-empty if self._option_uri_strict: uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(publish.topic) else: uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(publish.topic) if not uri_is_valid: if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Error( message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.INVALID_URI, [ u"publish with invalid topic URI '{0}' (URI strict checking {1})" .format(publish.topic, self._option_uri_strict) ]) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) else: if self._router.is_traced: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) return # disallow publication to topics starting with "wamp." other than for # trusted sessions (that are sessions built into Crossbar.io routing core) # if session._authrole is not None and session._authrole != u"trusted": is_restricted = publish.topic.startswith(u"wamp.") if is_restricted: if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Error( message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.INVALID_URI, [ u"publish with restricted topic URI '{0}'".format( publish.topic) ]) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) else: if self._router.is_traced: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) return # get subscriptions active on the topic published to # subscriptions = self._subscription_map.match_observations( publish.topic) # check if the event is being persisted by checking if we ourself are among the observers # on _any_ matching subscription # we've been previously added to observer lists on subscriptions ultimately from # node configuration and during the broker starts up. store_event = False if self._event_store: for subscription in subscriptions: if self._event_store in subscription.observers: store_event = True break if store_event: self.log.debug('Persisting event on topic "{topic}"', topic=publish.topic) # check if the event is to be retained by inspecting the 'retain' flag retain_event = False if publish.retain: retain_event = True # go on if (otherwise there isn't anything to do anyway): # # - there are any active subscriptions OR # - the publish is to be acknowledged OR # - the event is to be persisted OR # - the event is to be retained # if not (subscriptions or publish.acknowledge or store_event or retain_event): # the received PUBLISH message is the only one received/sent # for this WAMP action, so mark it as "last" (there is another code path below!) if self._router.is_traced: if publish.correlation_is_last is None: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) else: # validate payload # if publish.payload is None: try: self._router.validate('event', publish.topic, publish.args, publish.kwargs) except Exception as e: if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Error( message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.INVALID_ARGUMENT, [ u"publish to topic URI '{0}' with invalid application payload: {1}" .format(publish.topic, e) ]) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) else: if self._router.is_traced: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) return # authorize PUBLISH action # d = self._router.authorize(session, publish.topic, u'publish', options=publish.marshal_options()) def on_authorize_success(authorization): # the call to authorize the action _itself_ succeeded. now go on depending on whether # the action was actually authorized or not .. # if not authorization[u'allow']: if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Error( message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.NOT_AUTHORIZED, [ u"session not authorized to publish to topic '{0}'" .format(publish.topic) ]) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) else: if self._router.is_traced: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) else: # new ID for the publication # publication = util.id() # publisher disclosure # if authorization[u'disclose']: disclose = True elif (publish.topic.startswith(u"wamp.") or publish.topic.startswith(u"crossbar.")): disclose = True else: disclose = False if disclose: publisher = session._session_id publisher_authid = session._authid publisher_authrole = session._authrole else: publisher = None publisher_authid = None publisher_authrole = None # skip publisher # if publish.exclude_me is None or publish.exclude_me: me_also = False else: me_also = True # persist event (this is done only once, regardless of the number of subscriptions # the event matches on) # if store_event: self._event_store.store_event(session._session_id, publication, publish.topic, publish.args, publish.kwargs) # retain event on the topic # if retain_event: retained_event = RetainedEvent(publish, publisher, publisher_authid, publisher_authrole) observation = self._subscription_map.get_observation( publish.topic) if not observation: # No observation, lets make a new one observation = self._subscription_map.create_observation( publish.topic, extra=SubscriptionExtra()) else: # this can happen if event-history is # enabled on the topic: the event-store # creates an observation before any client # could possible hit the code above if observation.extra is None: observation.extra = SubscriptionExtra() elif not isinstance(observation.extra, SubscriptionExtra): raise Exception( "incorrect 'extra' for '{}'".format( publish.topic)) if observation.extra.retained_events: if not publish.eligible and not publish.exclude: observation.extra.retained_events = [ retained_event ] else: observation.extra.retained_events.append( retained_event) else: observation.extra.retained_events = [ retained_event ] subscription_to_receivers = {} total_receivers_cnt = 0 # iterate over all subscriptions and determine actual receivers of the event # under the respective subscription. also persist events (independent of whether # there is any actual receiver right now on the subscription) # for subscription in subscriptions: # persist event history, but check if it is persisted on the individual subscription! # if store_event and self._event_store in subscription.observers: self._event_store.store_event_history( publication, subscription.id) # initial list of receivers are all subscribers on a subscription .. # receivers = subscription.observers receivers = self._filter_publish_receivers( receivers, publish) # if receivers is non-empty, dispatch event .. # receivers_cnt = len(receivers) - (1 if self in receivers else 0) if receivers_cnt: total_receivers_cnt += receivers_cnt subscription_to_receivers[subscription] = receivers # send publish acknowledge before dispatching # if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Published(publish.request, publication) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False reply.correlation_is_last = total_receivers_cnt == 0 self._router.send(session, reply) else: if self._router.is_traced and publish.correlation_is_last is None: if total_receivers_cnt == 0: publish.correlation_is_last = True else: publish.correlation_is_last = False # now actually dispatch the events! # for chunked dispatching, this will be filled with deferreds for each chunk # processed. when the complete list of deferreds is done, that means the # event has been sent out to all applicable receivers all_dl = [] if total_receivers_cnt: # list of receivers that should have received the event, but we could not # send the event, since the receiver has disappeared in the meantime vanished_receivers = [] for subscription, receivers in subscription_to_receivers.items( ): self.log.debug( 'dispatching for subscription={subscription}', subscription=subscription) # for pattern-based subscriptions, the EVENT must contain # the actual topic being published to # if subscription.match != message.Subscribe.MATCH_EXACT: topic = publish.topic else: topic = None if publish.payload: msg = message.Event( subscription.id, publication, payload=publish.payload, publisher=publisher, publisher_authid=publisher_authid, publisher_authrole=publisher_authrole, topic=topic, enc_algo=publish.enc_algo, enc_key=publish.enc_key, enc_serializer=publish.enc_serializer) else: msg = message.Event( subscription.id, publication, args=publish.args, kwargs=publish.kwargs, publisher=publisher, publisher_authid=publisher_authid, publisher_authrole=publisher_authrole, topic=topic) # if the publish message had a correlation ID, this will also be the # correlation ID of the event message sent out msg.correlation_id = publish.correlation_id msg.correlation_uri = publish.topic msg.correlation_is_anchor = False msg.correlation_is_last = False chunk_size = self._options.event_dispatching_chunk_size if chunk_size and len(receivers) > chunk_size: self.log.debug( 'chunked dispatching to {receivers_size} with chunk_size={chunk_size}', receivers_size=len(receivers), chunk_size=chunk_size) else: self.log.debug( 'unchunked dispatching to {receivers_size} receivers', receivers_size=len(receivers)) # note that we're using one code-path for both chunked and unchunked # dispatches; the *first* chunk is always done "synchronously" (before # the first call-later) so "un-chunked mode" really just means we know # we'll be done right now and NOT do a call_later... # a Deferred that fires when all chunks are done all_d = txaio.create_future() all_dl.append(all_d) # all the event messages are the same except for the last one, which # needs to have the "is_last" flag set if we're doing a trace if self._router.is_traced: last_msg = copy.deepcopy(msg) last_msg.correlation_id = msg.correlation_id last_msg.correlation_uri = msg.correlation_uri last_msg.correlation_is_anchor = False last_msg.correlation_is_last = True def _notify_some(receivers): # we do a first pass over the proposed chunk of receivers # because not all of them will have a transport, and if this # will be the last chunk of receivers we need to figure out # which event is last... receivers_this_chunk = [] for receiver in receivers[:chunk_size]: if receiver._session_id and receiver._transport: receivers_this_chunk.append(receiver) else: vanished_receivers.append(receiver) receivers = receivers[chunk_size:] # XXX note there's still going to be some edge-cases here .. if # we are NOT the last chunk, but all the next chunk's receivers # (could be only 1 in that chunk!) vanish before we run our next # batch, then a "last" event will never go out ... # we now actually do the deliveries, but now we know which # receiver is the last one if receivers or not self._router.is_traced: # NOT the last chunk (or we're not traced so don't care) for receiver in receivers_this_chunk: self._router.send(receiver, msg) else: # last chunk, so last receiver gets the different message for receiver in receivers_this_chunk[:-1]: self._router.send(receiver, msg) # we might have zero valid receivers if receivers_this_chunk: self._router.send( receivers_this_chunk[-1], last_msg) if receivers: # still more to do .. return txaio.call_later( 0, _notify_some, receivers) else: # all done! resolve all_d, which represents all receivers # to a single subscription matching the event txaio.resolve(all_d, None) _notify_some([ recv for recv in receivers if (me_also or recv != session) and recv != self._event_store ]) return txaio.gather(all_dl) def on_authorize_error(err): """ the call to authorize the action _itself_ failed (note this is different from the call to authorize succeed, but the authorization being denied) """ self.log.failure("Authorization failed", failure=err) if publish.acknowledge: if self._router.is_traced: publish.correlation_is_last = False self._router._factory._worker._maybe_trace_rx_msg( session, publish) reply = message.Error( message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.AUTHORIZATION_FAILED, [ u"failed to authorize session for publishing to topic URI '{0}': {1}" .format(publish.topic, err.value) ]) reply.correlation_id = publish.correlation_id reply.correlation_uri = publish.topic reply.correlation_is_anchor = False self._router.send(session, reply) else: if self._router.is_traced: publish.correlation_is_last = True self._router._factory._worker._maybe_trace_rx_msg( session, publish) txaio.add_callbacks(d, on_authorize_success, on_authorize_error) def processSubscribe(self, session, subscribe): """ Implements :func:`crossbar.router.interfaces.IBroker.processSubscribe` """ if self._router.is_traced: if not subscribe.correlation_id: subscribe.correlation_id = self._router.new_correlation_id() subscribe.correlation_is_anchor = True subscribe.correlation_is_last = False if not subscribe.correlation_uri: subscribe.correlation_uri = subscribe.topic self._router._factory._worker._maybe_trace_rx_msg( session, subscribe) # check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all # URI components must be non-empty for normal subscriptions, may be empty for # wildcard subscriptions and must be non-empty for all but the last component for # prefix subscriptions # if self._option_uri_strict: if subscribe.match == u"wildcard": uri_is_valid = _URI_PAT_STRICT_EMPTY.match(subscribe.topic) elif subscribe.match == u"prefix": uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match( subscribe.topic) else: uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(subscribe.topic) else: if subscribe.match == u"wildcard": uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(subscribe.topic) elif subscribe.match == u"prefix": uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(subscribe.topic) else: uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(subscribe.topic) if not uri_is_valid: reply = message.Error( message.Subscribe.MESSAGE_TYPE, subscribe.request, ApplicationError.INVALID_URI, [ u"subscribe for invalid topic URI '{0}'".format( subscribe.topic) ]) reply.correlation_id = subscribe.correlation_id reply.correlation_uri = subscribe.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) return # authorize SUBSCRIBE action # d = self._router.authorize(session, subscribe.topic, u'subscribe', options=subscribe.marshal_options()) def on_authorize_success(authorization): if not authorization[u'allow']: # error reply since session is not authorized to subscribe # replies = [ message.Error( message.Subscribe.MESSAGE_TYPE, subscribe.request, ApplicationError.NOT_AUTHORIZED, [ u"session is not authorized to subscribe to topic '{0}'" .format(subscribe.topic) ]) ] replies[0].correlation_id = subscribe.correlation_id replies[0].correlation_uri = subscribe.topic replies[0].correlation_is_anchor = False replies[0].correlation_is_last = True else: # ok, session authorized to subscribe. now get the subscription # subscription, was_already_subscribed, is_first_subscriber = self._subscription_map.add_observer( session, subscribe.topic, subscribe.match, extra=SubscriptionExtra()) if not was_already_subscribed: self._session_to_subscriptions[session].add(subscription) # publish WAMP meta events, if we have a service session, but # not for the meta API itself! # if self._router._realm and \ self._router._realm.session and \ not subscription.uri.startswith(u'wamp.') and \ (is_first_subscriber or not was_already_subscribed): has_follow_up_messages = True def _publish(): service_session = self._router._realm.session options = types.PublishOptions( correlation_id=subscribe.correlation_id, correlation_is_anchor=False, correlation_is_last=False, ) if is_first_subscriber: subscription_details = { u'id': subscription.id, u'created': subscription.created, u'uri': subscription.uri, u'match': subscription.match, } service_session.publish( u'wamp.subscription.on_create', session._session_id, subscription_details, options=options, ) if not was_already_subscribed: options.correlation_is_last = True service_session.publish( u'wamp.subscription.on_subscribe', session._session_id, subscription.id, options=options, ) # we postpone actual sending of meta events until we return to this client session self._reactor.callLater(0, _publish) else: has_follow_up_messages = False # check for retained events # def _get_retained_event(): if subscription.extra.retained_events: retained_events = list( subscription.extra.retained_events) retained_events.reverse() for retained_event in retained_events: authorized = False if not retained_event.publish.exclude and not retained_event.publish.eligible: authorized = True elif session._session_id in retained_event.publish.eligible and session._session_id not in retained_event.publish.exclude: authorized = True if authorized: publication = util.id() if retained_event.publish.payload: msg = message.Event( subscription.id, publication, payload=retained_event.publish.payload, enc_algo=retained_event.publish. enc_algo, enc_key=retained_event.publish.enc_key, enc_serializer=retained_event.publish. enc_serializer, publisher=retained_event.publisher, publisher_authid=retained_event. publisher_authid, publisher_authrole=retained_event. publisher_authrole, retained=True) else: msg = message.Event( subscription.id, publication, args=retained_event.publish.args, kwargs=retained_event.publish.kwargs, publisher=retained_event.publisher, publisher_authid=retained_event. publisher_authid, publisher_authrole=retained_event. publisher_authrole, retained=True) msg.correlation_id = subscribe.correlation_id msg.correlation_uri = subscribe.topic msg.correlation_is_anchor = False msg.correlation_is_last = False return [msg] return [] # acknowledge subscribe with subscription ID # replies = [ message.Subscribed(subscribe.request, subscription.id) ] replies[0].correlation_id = subscribe.correlation_id replies[0].correlation_uri = subscribe.topic replies[0].correlation_is_anchor = False replies[0].correlation_is_last = False if subscribe.get_retained: replies.extend(_get_retained_event()) replies[-1].correlation_is_last = not has_follow_up_messages # send out reply to subscribe requestor # [self._router.send(session, reply) for reply in replies] def on_authorize_error(err): """ the call to authorize the action _itself_ failed (note this is different from the call to authorize succeed, but the authorization being denied) """ self.log.failure("Authorization of 'subscribe' for '{uri}' failed", uri=subscribe.topic, failure=err) reply = message.Error( message.Subscribe.MESSAGE_TYPE, subscribe.request, ApplicationError.AUTHORIZATION_FAILED, [ u"failed to authorize session for subscribing to topic URI '{0}': {1}" .format(subscribe.topic, err.value) ]) reply.correlation_id = subscribe.correlation_id reply.correlation_uri = subscribe.topic reply.correlation_is_anchor = False reply.correlation_is_last = True self._router.send(session, reply) txaio.add_callbacks(d, on_authorize_success, on_authorize_error) def processUnsubscribe(self, session, unsubscribe): """ Implements :func:`crossbar.router.interfaces.IBroker.processUnsubscribe` """ if self._router.is_traced: if not unsubscribe.correlation_id: unsubscribe.correlation_id = self._router.new_correlation_id() unsubscribe.correlation_is_anchor = True unsubscribe.correlation_is_last = False # get subscription by subscription ID or None (if it doesn't exist on this broker) # subscription = self._subscription_map.get_observation_by_id( unsubscribe.subscription) if subscription: if self._router.is_traced and not unsubscribe.correlation_uri: unsubscribe.correlation_uri = subscription.uri if session in subscription.observers: was_subscribed, was_last_subscriber, has_follow_up_messages = self._unsubscribe( subscription, session, unsubscribe) reply = message.Unsubscribed(unsubscribe.request) if self._router.is_traced: reply.correlation_uri = subscription.uri reply.correlation_is_last = not has_follow_up_messages else: # subscription exists on this broker, but the session that wanted to unsubscribe wasn't subscribed # reply = message.Error(message.Unsubscribe.MESSAGE_TYPE, unsubscribe.request, ApplicationError.NO_SUCH_SUBSCRIPTION) if self._router.is_traced: reply.correlation_uri = reply.error reply.correlation_is_last = True else: # subscription doesn't even exist on this broker # reply = message.Error(message.Unsubscribe.MESSAGE_TYPE, unsubscribe.request, ApplicationError.NO_SUCH_SUBSCRIPTION) if self._router.is_traced: reply.correlation_uri = reply.error reply.correlation_is_last = True if self._router.is_traced: self._router._factory._worker._maybe_trace_rx_msg( session, unsubscribe) reply.correlation_id = unsubscribe.correlation_id reply.correlation_is_anchor = False self._router.send(session, reply) def _unsubscribe(self, subscription, session, unsubscribe=None): # drop session from subscription observers # was_subscribed, was_last_subscriber = self._subscription_map.drop_observer( session, subscription) was_deleted = False if was_subscribed and was_last_subscriber and not subscription.extra.retained_events: self._subscription_map.delete_observation(subscription) was_deleted = True # remove subscription from session->subscriptions map # if was_subscribed: self._session_to_subscriptions[session].discard(subscription) # publish WAMP meta events, if we have a service session, but # not for the meta API itself! # if self._router._realm and \ self._router._realm.session and \ not subscription.uri.startswith(u'wamp.') and \ (was_subscribed or was_deleted): has_follow_up_messages = True def _publish(): service_session = self._router._realm.session if unsubscribe and self._router.is_traced: options = types.PublishOptions( correlation_id=unsubscribe.correlation_id, correlation_is_anchor=False, correlation_is_last=False) else: options = None if was_subscribed: service_session.publish( u'wamp.subscription.on_unsubscribe', session._session_id, subscription.id, options=options, ) if was_deleted: if options: options.correlation_is_last = True service_session.publish( u'wamp.subscription.on_delete', session._session_id, subscription.id, options=options, ) # we postpone actual sending of meta events until we return to this client session self._reactor.callLater(0, _publish) else: has_follow_up_messages = False return was_subscribed, was_last_subscriber, has_follow_up_messages def removeSubscriber(self, subscription, session, reason=None): """ Actively unsubscribe a subscriber session from a subscription. """ was_subscribed, was_last_subscriber, _ = self._unsubscribe( subscription, session) if 'subscriber' in session._session_roles and session._session_roles[ 'subscriber'] and session._session_roles[ 'subscriber'].subscription_revocation: reply = message.Unsubscribed(0, subscription=subscription.id, reason=reason) reply.correlation_uri = subscription.uri self._router.send(session, reply) return was_subscribed, was_last_subscriber
class ApplicationRunner(object): """ This class is a convenience tool mainly for development and quick hosting of WAMP application components. It can host a WAMP application component in a WAMP-over-WebSocket client connecting to a WAMP router. """ log = txaio.make_logger() def __init__(self, url, realm=None, extra=None, serializers=None, ssl=None, proxy=None, headers=None, max_retries=None, initial_retry_delay=None, max_retry_delay=None, retry_delay_growth=None, retry_delay_jitter=None): """ :param url: The WebSocket URL of the WAMP router to connect to (e.g. `ws://somehost.com:8090/somepath`) :type url: str :param realm: The WAMP realm to join the application session to. :type realm: str :param extra: Optional extra configuration to forward to the application component. :type extra: dict :param serializers: A list of WAMP serializers to use (or None for default serializers). Serializers must implement :class:`autobahn.wamp.interfaces.ISerializer`. :type serializers: list :param ssl: (Optional). If specified this should be an instance suitable to pass as ``sslContextFactory`` to :class:`twisted.internet.endpoints.SSL4ClientEndpoint`` such as :class:`twisted.internet.ssl.CertificateOptions`. Leaving it as ``None`` will use the result of calling Twisted's :meth:`twisted.internet.ssl.platformTrust` which tries to use your distribution's CA certificates. :type ssl: :class:`twisted.internet.ssl.CertificateOptions` :param proxy: Explicit proxy server to use; a dict with ``host`` and ``port`` keys :type proxy: dict or None :param headers: Additional headers to send (only applies to WAMP-over-WebSocket). :type headers: dict :param max_retries: Maximum number of reconnection attempts. Unlimited if set to -1. :type max_retries: int :param initial_retry_delay: Initial delay for reconnection attempt in seconds (Default: 1.0s). :type initial_retry_delay: float :param max_retry_delay: Maximum delay for reconnection attempts in seconds (Default: 60s). :type max_retry_delay: float :param retry_delay_growth: The growth factor applied to the retry delay between reconnection attempts (Default 1.5). :type retry_delay_growth: float :param retry_delay_jitter: A 0-argument callable that introduces nose into the delay. (Default random.random) :type retry_delay_jitter: float """ assert (type(url) == str) assert (realm is None or type(realm) == str) assert (extra is None or type(extra) == dict) assert (headers is None or type(headers) == dict) assert (proxy is None or type(proxy) == dict) self.url = url self.realm = realm self.extra = extra or dict() self.serializers = serializers self.ssl = ssl self.proxy = proxy self.headers = headers self.max_retries = max_retries self.initial_retry_delay = initial_retry_delay self.max_retry_delay = max_retry_delay self.retry_delay_growth = retry_delay_growth self.retry_delay_jitter = retry_delay_jitter # this if for auto-reconnection when Twisted ClientService is avail self._client_service = None # total number of successful connections self._connect_successes = 0 @public def stop(self): """ Stop reconnecting, if auto-reconnecting was enabled. """ self.log.debug('{klass}.stop()', klass=self.__class__.__name__) if self._client_service: return self._client_service.stopService() else: return succeed(None) @public def run(self, make, start_reactor=True, auto_reconnect=False, log_level='info', endpoint=None, reactor=None): """ Run the application component. :param make: A factory that produces instances of :class:`autobahn.twisted.wamp.ApplicationSession` when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`. :type make: callable :param start_reactor: When ``True`` (the default) this method starts the Twisted reactor and doesn't return until the reactor stops. If there are any problems starting the reactor or connect()-ing, we stop the reactor and raise the exception back to the caller. :returns: None is returned, unless you specify ``start_reactor=False`` in which case the Deferred that connect() returns is returned; this will callback() with an IProtocol instance, which will actually be an instance of :class:`WampWebSocketClientProtocol` """ self.log.debug('{klass}.run()', klass=self.__class__.__name__) if start_reactor: # only select framework, set loop and start logging when we are asked # start the reactor - otherwise we are running in a program that likely # already tool care of all this. from twisted.internet import reactor txaio.use_twisted() txaio.config.loop = reactor txaio.start_logging(level=log_level) if callable(make): # factory for use ApplicationSession def create(): cfg = ComponentConfig(self.realm, self.extra, runner=self) try: session = make(cfg) except Exception: self.log.failure( 'ApplicationSession could not be instantiated: {log_failure.value}' ) if start_reactor and reactor.running: reactor.stop() raise else: return session else: create = make if self.url.startswith('rs'): # try to parse RawSocket URL .. isSecure, host, port = parse_rs_url(self.url) # use the first configured serializer if any (which means, auto-choose "best") serializer = self.serializers[0] if self.serializers else None # create a WAMP-over-RawSocket transport client factory transport_factory = WampRawSocketClientFactory( create, serializer=serializer) else: # try to parse WebSocket URL .. isSecure, host, port, resource, path, params = parse_ws_url( self.url) # create a WAMP-over-WebSocket transport client factory transport_factory = WampWebSocketClientFactory( create, url=self.url, serializers=self.serializers, proxy=self.proxy, headers=self.headers) # client WebSocket settings - similar to: # - http://crossbar.io/docs/WebSocket-Compression/#production-settings # - http://crossbar.io/docs/WebSocket-Options/#production-settings # The permessage-deflate extensions offered to the server .. offers = [PerMessageDeflateOffer()] # Function to accept permessage_delate responses from the server .. def accept(response): if isinstance(response, PerMessageDeflateResponse): return PerMessageDeflateResponseAccept(response) # set WebSocket options for all client connections transport_factory.setProtocolOptions( maxFramePayloadSize=1048576, maxMessagePayloadSize=1048576, autoFragmentSize=65536, failByDrop=False, openHandshakeTimeout=2.5, closeHandshakeTimeout=1., tcpNoDelay=True, autoPingInterval=10., autoPingTimeout=5., autoPingSize=12, perMessageCompressionOffers=offers, perMessageCompressionAccept=accept) # supress pointless log noise transport_factory.noisy = False if endpoint: client = endpoint else: # if user passed ssl= but isn't using isSecure, we'll never # use the ssl argument which makes no sense. context_factory = None if self.ssl is not None: if not isSecure: raise RuntimeError( 'ssl= argument value passed to %s conflicts with the "ws:" ' 'prefix of the url argument. Did you mean to use "wss:"?' % self.__class__.__name__) context_factory = self.ssl elif isSecure: from twisted.internet.ssl import optionsForClientTLS context_factory = optionsForClientTLS(host) from twisted.internet import reactor if self.proxy is not None: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port']) transport_factory.contextFactory = context_factory elif isSecure: from twisted.internet.endpoints import SSL4ClientEndpoint assert context_factory is not None client = SSL4ClientEndpoint(reactor, host, port, context_factory) else: from twisted.internet.endpoints import TCP4ClientEndpoint client = TCP4ClientEndpoint(reactor, host, port) # as the reactor shuts down, we wish to wait until we've sent # out our "Goodbye" message; leave() returns a Deferred that # fires when the transport gets to STATE_CLOSED def cleanup(proto): if hasattr(proto, '_session') and proto._session is not None: if proto._session.is_attached(): return proto._session.leave() elif proto._session.is_connected(): return proto._session.disconnect() # when our proto was created and connected, make sure it's cleaned # up properly later on when the reactor shuts down for whatever reason def init_proto(proto): self._connect_successes += 1 reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto) return proto use_service = False if auto_reconnect: try: # since Twisted 16.1.0 from twisted.application.internet import ClientService from twisted.application.internet import backoffPolicy use_service = True except ImportError: use_service = False if use_service: # this code path is automatically reconnecting .. self.log.debug('using t.a.i.ClientService') if self.max_retries or self.initial_retry_delay or self.max_retry_delay or self.retry_delay_growth or self.retry_delay_jitter: kwargs = {} def _jitter(): j = 1 if self.retry_delay_jitter is None else self.retry_delay_jitter return random.random() * j for key, val in [('initialDelay', self.initial_retry_delay), ('maxDelay', self.max_retry_delay), ('factor', self.retry_delay_growth), ('jitter', _jitter)]: if val: kwargs[key] = val # retry policy that will only try to reconnect if we connected # successfully at least once before (so it fails on host unreachable etc ..) def retry(failed_attempts): if self._connect_successes > 0 and ( self.max_retries == -1 or failed_attempts < self.max_retries): return backoffPolicy(**kwargs)(failed_attempts) else: print('hit stop') self.stop() return 100000000000000 else: retry = backoffPolicy() self._client_service = ClientService(client, transport_factory, retryPolicy=retry) self._client_service.startService() d = self._client_service.whenConnected() else: # this code path is only connecting once! self.log.debug('using t.i.e.connect()') d = client.connect(transport_factory) # if we connect successfully, the arg is a WampWebSocketClientProtocol d.addCallback(init_proto) # if the user didn't ask us to start the reactor, then they # get to deal with any connect errors themselves. if start_reactor: # if an error happens in the connect(), we save the underlying # exception so that after the event-loop exits we can re-raise # it to the caller. class ErrorCollector(object): exception = None def __call__(self, failure): self.exception = failure.value reactor.stop() connect_error = ErrorCollector() d.addErrback(connect_error) # now enter the Twisted reactor loop reactor.run() # if the ApplicationSession sets an "error" key on the self.config.extra dictionary, which # has been set to the self.extra dictionary, extract the Exception from that and re-raise # it as the very last one (see below) exciting back to the caller of self.run() app_error = self.extra.get('error', None) # if we exited due to a connection error, raise that to the caller if connect_error.exception: raise connect_error.exception elif app_error: raise app_error else: # let the caller handle any errors return d
from __future__ import absolute_import import os import traceback import crossbar from autobahn.twisted import websocket from autobahn.twisted import rawsocket from autobahn.websocket.compress import PerMessageDeflateOffer, PerMessageDeflateOfferAccept from txaio import make_logger from crossbar.router.cookiestore import CookieStoreMemoryBacked, CookieStoreFileBacked log = make_logger() __all__ = ( 'WampWebSocketServerFactory', 'WampRawSocketServerFactory', 'WampWebSocketServerProtocol', 'WampRawSocketServerProtocol', 'WampWebSocketClientFactory', 'WampRawSocketClientFactory', 'WampWebSocketClientProtocol', 'WampRawSocketClientProtocol', ) def set_websocket_options(factory, options): """
class WampWebSocketServerProtocol(websocket.WampWebSocketServerProtocol): """ Crossbar.io WAMP-over-WebSocket server protocol. """ log = make_logger() def __init__(self): super(WampWebSocketServerProtocol, self).__init__() self._cbtid = None def onConnect(self, request): if self.factory.debug_traffic: from twisted.internet import reactor def print_traffic(): self.log.info( "Traffic {peer}: {wire_in} / {wire_out} in / out bytes - {ws_in} / {ws_out} in / out msgs", peer=self.peer, wire_in=self.trafficStats.incomingOctetsWireLevel, wire_out=self.trafficStats.outgoingOctetsWireLevel, ws_in=self.trafficStats.incomingWebSocketMessages, ws_out=self.trafficStats.outgoingWebSocketMessages, ) reactor.callLater(1, print_traffic) print_traffic() # if WebSocket client did not set WS subprotocol, assume "wamp.2.json" # self.STRICT_PROTOCOL_NEGOTIATION = self.factory._requireWebSocketSubprotocol # handle WebSocket opening handshake # protocol, headers = websocket.WampWebSocketServerProtocol.onConnect( self, request) try: self._origin = request.origin # transport-level WMAP authentication info # self._authid = None self._authrole = None self._authrealm = None self._authmethod = None self._authprovider = None # cookie tracking and cookie-based authentication # self._cbtid = None if self.factory._cookiestore: # try to parse an already set cookie from HTTP request headers self._cbtid = self.factory._cookiestore.parse(request.headers) # if no cookie is set, create a new one .. if self._cbtid is None: self._cbtid, headers[ 'Set-Cookie'] = self.factory._cookiestore.create() self.log.debug("Setting new cookie: {cookie}", cookie=headers['Set-Cookie']) else: self.log.debug("Cookie already set") # add this WebSocket connection to the set of connections # associated with the same cookie self.factory._cookiestore.addProto(self._cbtid, self) self.log.debug( "Cookie tracking enabled on WebSocket connection {ws}", ws=self) # if cookie-based authentication is enabled, set auth info from cookie store # if 'auth' in self.factory._config and 'cookie' in self.factory._config[ 'auth']: self._authid, self._authrole, self._authmethod, self._authrealm = self.factory._cookiestore.getAuth( self._cbtid) if self._authid: # there is a cookie set, and the cookie was previously successfully authenticated, # so immediately authenticate the client using that information self._authprovider = u'cookie' self.log.debug( "Authenticated client via cookie cbtid={cbtid} as authid={authid}, authrole={authrole}, authmethod={authmethod}, authrealm={authrealm}", cbtid=self._cbtid, authid=self._authid, authrole=self._authrole, authmethod=self._authmethod, authrealm=self._authrealm) else: # there is a cookie set, but the cookie wasn't authenticated yet using a different auth method self.log.debug( "Cookie-based authentication enabled, but cookie isn't authenticated yet" ) else: self.log.debug("Cookie-based authentication disabled") else: self.log.debug( "Cookie tracking disabled on WebSocket connection {ws}", ws=self) # remember transport level info for later forwarding in # WAMP meta event "wamp.session.on_join" # self._transport_info = { u'type': 'websocket', u'protocol': protocol, u'peer': self.peer, u'http_headers_received': request.headers, u'http_headers_sent': headers, u'cbtid': self._cbtid } # accept the WebSocket connection, speaking subprotocol `protocol` # and setting HTTP headers `headers` # return (protocol, headers) except Exception: traceback.print_exc() def sendServerStatus(self, redirectUrl=None, redirectAfter=0): """ Used to send out server status/version upon receiving a HTTP/GET without upgrade to WebSocket header (and option serverStatus is True). """ try: page = self.factory._templates.get_template('cb_ws_status.html') self.sendHtml( page.render(redirectUrl=redirectUrl, redirectAfter=redirectAfter, cbVersion=crossbar.__version__, wsUri=self.factory.url, peer=self.peer, workerPid=os.getpid())) except Exception: self.log.failure( "Error rendering WebSocket status page template: {log_failure.value}" ) def onClose(self, wasClean, code, reason): super(WampWebSocketServerProtocol, self).onClose(wasClean, code, reason) # remove this WebSocket connection from the set of connections # associated with the same cookie if self._cbtid: self.factory._cookiestore.dropProto(self._cbtid, self)
def _run_command_start(options, reactor, personality): """ Subcommand "crossbar start". """ # do not allow to run more than one Crossbar.io instance # from the same Crossbar.io node directory # pid_data = _check_is_running(options.cbdir) if pid_data: print("Crossbar.io is already running from node directory {} (PID {}).".format(options.cbdir, pid_data['pid'])) sys.exit(1) else: fp = os.path.join(options.cbdir, _PID_FILENAME) with open(fp, 'wb') as fd: argv = options.argv options_dump = vars(options) pid_data = { 'pid': os.getpid(), 'argv': argv, 'options': {x: y for x, y in options_dump.items() if x not in ["func", "argv"]} } fd.write("{}\n".format( json.dumps(pid_data, sort_keys=False, indent=4, separators=(', ', ': '), ensure_ascii=False)).encode('utf8')) # remove node PID file when reactor exits # def remove_pid_file(): fp = os.path.join(options.cbdir, _PID_FILENAME) if os.path.isfile(fp): os.remove(fp) reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file) log = make_logger() # represents the running Crossbar.io node # enable_vmprof = False if _HAS_VMPROF: enable_vmprof = options.vmprof node_options = personality.NodeOptions(debug_lifecycle=options.debug_lifecycle, debug_programflow=options.debug_programflow, enable_vmprof=enable_vmprof) node = personality.Node(personality, options.cbdir, reactor=reactor, options=node_options) # print the banner, personality and node directory # for line in personality.BANNER.splitlines(): log.info(hl(line, color='yellow', bold=True)) print() log.info('{note} {func}', note=hl('Booting {} node ..'.format(personality.NAME), color='red', bold=True), func=hltype(_run_command_start)) log.debug('Running on realm="{realm}" from cbdir="{cbdir}"', realm=hlid(node.realm), cbdir=hlid(options.cbdir)) # check and load the node configuration # try: config_source, config_path = node.load_config(options.config) except InvalidConfigException as e: log.failure() log.error("Invalid node configuration") log.error("{e!s}", e=e) sys.exit(1) except: raise else: config_source = node.CONFIG_SOURCE_TO_STR.get(config_source, None) log.info('Node configuration loaded [config_source={config_source}, config_path={config_path}]', config_source=hl(config_source, bold=True, color='green'), config_path=hlid(config_path)) # possibly generate new node key # if not node.is_key_loaded(): node.load_keys(options.cbdir) # if vmprof global profiling is enabled via command line option, this will carry # the file where vmprof writes its profile data if _HAS_VMPROF: _vm_prof = { # need to put this into a dict, since FDs are ints, and python closures can't # write to this otherwise 'outfd': None } # https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorCore.html # Each "system event" in Twisted, such as 'startup', 'shutdown', and 'persist', has 3 phases: # 'before', 'during', and 'after' (in that order, of course). These events will be fired # internally by the Reactor. def before_reactor_started(): term_print('CROSSBAR:REACTOR_STARTING') def after_reactor_started(): term_print('CROSSBAR:REACTOR_STARTED') if _HAS_VMPROF and options.vmprof: outfn = os.path.join(options.cbdir, '.vmprof-controller-{}.dat'.format(os.getpid())) _vm_prof['outfd'] = os.open(outfn, os.O_RDWR | os.O_CREAT | os.O_TRUNC) vmprof.enable(_vm_prof['outfd'], period=0.01) term_print('CROSSBAR:VMPROF_ENABLED:{}'.format(outfn)) def before_reactor_stopped(): term_print('CROSSBAR:REACTOR_STOPPING') if _HAS_VMPROF and options.vmprof and _vm_prof['outfd']: vmprof.disable() term_print('CROSSBAR:VMPROF_DISABLED') def after_reactor_stopped(): # FIXME: we are indeed reaching this line, however, # the log output does not work (it also doesnt work using # plain old print). Dunno why. # my theory about this issue is: by the time this line # is reached, Twisted has already closed the stdout/stderr # pipes. hence we do an evil trick: we directly write to # the process' controlling terminal # https://unix.stackexchange.com/a/91716/52500 term_print('CROSSBAR:REACTOR_STOPPED') reactor.addSystemEventTrigger('before', 'startup', before_reactor_started) reactor.addSystemEventTrigger('after', 'startup', after_reactor_started) reactor.addSystemEventTrigger('before', 'shutdown', before_reactor_stopped) reactor.addSystemEventTrigger('after', 'shutdown', after_reactor_stopped) # now actually start the node .. # exit_info = {'was_clean': None} def start_crossbar(): term_print('CROSSBAR:NODE_STARTING') # # ****** main entry point of node ****** # d = node.start() # node started successfully, and later .. def on_startup_success(_shutdown_complete): term_print('CROSSBAR:NODE_STARTED') shutdown_complete = _shutdown_complete['shutdown_complete'] # .. exits, signaling exit status _inside_ the result returned def on_shutdown_success(shutdown_info): exit_info['was_clean'] = shutdown_info['was_clean'] log.info('on_shutdown_success: was_clean={was_clean}', shutdown_info['was_clean']) # should not arrive here: def on_shutdown_error(err): exit_info['was_clean'] = False log.error("on_shutdown_error: {tb}", tb=failure_format_traceback(err)) shutdown_complete.addCallbacks(on_shutdown_success, on_shutdown_error) # node could not even start def on_startup_error(err): term_print('CROSSBAR:NODE_STARTUP_FAILED') exit_info['was_clean'] = False log.error("Could not start node: {tb}", tb=failure_format_traceback(err)) if reactor.running: reactor.stop() d.addCallbacks(on_startup_success, on_startup_error) # Call a function when the reactor is running. If the reactor has not started, the callable # will be scheduled to run when it does start. reactor.callWhenRunning(start_crossbar) # Special feature to automatically shutdown the node after this many seconds if options.shutdownafter: @inlineCallbacks def _shutdown(): term_print('CROSSBAR:SHUTDOWN_AFTER_FIRED') shutdown_info = yield node.stop() exit_info['was_clean'] = shutdown_info['was_clean'] term_print('CROSSBAR:SHUTDOWN_AFTER_COMPLETE') reactor.callLater(options.shutdownafter, _shutdown) # now enter event loop .. # log.info(hl('Entering event reactor ...', color='green', bold=True)) term_print('CROSSBAR:REACTOR_ENTERED') reactor.run() # once the reactor has finally stopped, we get here, and at that point, # exit_info['was_clean'] MUST have been set before - either to True or to False # (otherwise we are missing a code path to handle in above) # exit the program with exit code depending on whether the node has been cleanly shut down if exit_info['was_clean'] is True: term_print('CROSSBAR:EXIT_WITH_SUCCESS') sys.exit(0) elif exit_info['was_clean'] is False: term_print('CROSSBAR:EXIT_WITH_ERROR') sys.exit(1) else: term_print('CROSSBAR:EXIT_WITH_INTERNAL_ERROR') sys.exit(1)