Ejemplo n.º 1
0
    def __init__(self, options, session):
        """
        Ctor.

        :param options: Options for path service from configuration.
        :type options: dict
        :param session: Instance of `ApplicationSession` to be used for forwarding events.
        :type session: obj
        """
        Resource.__init__(self)
        self._options = options
        self._session = session
        self.log = make_logger()

        self._key = None
        if 'key' in options:
            self._key = options['key'].encode('utf8')

        self._secret = None
        if 'secret' in options:
            self._secret = options['secret'].encode('utf8')

        self._post_body_limit = int(options.get('post_body_limit', 0))
        self._timestamp_delta_limit = int(options.get('timestamp_delta_limit', 300))

        self._require_ip = None
        if 'require_ip' in options:
            self._require_ip = [IPNetwork(net) for net in options['require_ip']]

        self._require_tls = options.get('require_tls', None)
Ejemplo n.º 2
0
    def test_start_router_component_invalid_type(self):
        """
        Trying to start a component with an invalid type fails.
        """
        log_list = []

        r = router.RouterWorkerSession(config=self.config, reactor=reactor)
        r.log = make_logger(observer=log_list.append, log_level="debug")

        # Open the transport
        transport = FakeWAMPTransport(r)
        r.onOpen(transport)

        realm_config = {
            u"name": u"realm1",
            u'roles': []
        }

        r.start_router_realm("realm1", realm_config)

        component_config = {
            "type": u"notathingcrossbarsupports",
            "realm": u"realm1"
        }

        with self.assertRaises(ApplicationError) as e:
            r.start_router_component("newcomponent", component_config)

        self.assertIn(
            ("ERROR: invalid router component configuration (invalid value "
             "'notathingcrossbarsupports' for component type)"),
            str(e.exception.args[0]))

        self.assertEqual(len(r.get_router_components()), 0)
Ejemplo n.º 3
0
    def __init__(self, reactor, options):
        """
        Ctor.

        :param reactor: Reactor to run on.
        :type reactor: obj
        :param options: Options from command line.
        :type options: obj
        """
        self.log = make_logger()

        self.options = options
        # the reactor under which we run
        self._reactor = reactor

        # shortname for reactor to run (when given via explicit option) or None
        self._reactor_shortname = options.reactor

        # node directory
        self._cbdir = options.cbdir

        # the node's name (must be unique within the management realm)
        self._node_id = None

        # the node's management realm
        self._realm = None

        # node controller session (a singleton ApplicationSession embedded
        # in the node's management router)
        self._controller = None
Ejemplo n.º 4
0
    def _cleanup_worker(reactor, worker):
        """
        This is called during reactor shutdown and ensures we wait for our
        subprocesses to shut down nicely.
        """
        log = make_logger()
        try:
            log.info("sending TERM to subprocess {pid}", pid=worker.pid)
            worker.proto.transport.signalProcess('TERM')
            # wait for the subprocess to shutdown; could add a timeout
            # after which we send a KILL maybe?
            d = Deferred()

            def protocol_closed(_):
                log.debug("{pid} exited", pid=worker.pid)
                d.callback(None)

            # await worker's timely demise
            worker.exit.addCallback(protocol_closed)

            def timeout(tried):
                if d.called:
                    return
                log.info("waiting for {pid} to exit...", pid=worker.pid)
                reactor.callLater(1, timeout, tried + 1)
                if tried > 20:  # or just wait forever?
                    log.info("Sending SIGKILL to {pid}", pid=worker.pid)
                    worker.proto.transport.signalProcess('KILL')
                    d.callback(None)  # or recurse more?
            timeout(0)
            return d
        except ProcessExitedAlready:
            pass  # ignore; it's already dead
Ejemplo n.º 5
0
def _appsession_loader(config):
    """
    Load a class or a WAMPlet from C{config}.
    """
    log = make_logger()

    if config['type'] == 'class':

        try:
            klassname = config['classname']

            log.debug("Starting class '{klass}'", klass=klassname)

            c = klassname.split('.')
            module_name, klass_name = '.'.join(c[:-1]), c[-1]
            module = importlib.import_module(module_name)
            component = getattr(module, klass_name)

            if not issubclass(component, ApplicationSession):
                raise ApplicationError(
                    u"crossbar.error.class_import_failed", "session not derived of ApplicationSession"
                )

        except Exception as e:
            emsg = "Failed to import class '{}'\n{}".format(
                klassname, Failure(e).getTraceback())
            log.debug(emsg)
            log.debug("PYTHONPATH: {pythonpath}", pythonpath=sys.path)
            raise ApplicationError(
                u"crossbar.error.class_import_failed",
                emsg,
                pythonpath=sys.path
            )

    elif config['type'] == 'wamplet':

        try:
            dist = config['package']
            name = config['entrypoint']

            log.debug("Starting WAMPlet '{}/{}'".format(dist, name))

            # component is supposed to make instances of ApplicationSession
            component = pkg_resources.load_entry_point(
                dist, 'autobahn.twisted.wamplet', name)

        except Exception as e:
            emsg = "Failed to import wamplet '{}/{}'\n{}".format(
                dist, name, Failure(e).getTraceback())
            log.error(emsg)
            raise ApplicationError(u"crossbar.error.class_import_failed", emsg)

    else:
        raise ApplicationError(
            u"crossbar.error.invalid_configuration",
            "invalid component type '{}'".format(config['type'])
        )

    return component
Ejemplo n.º 6
0
 def test_set_global_does_not_change_explicit_loggers(self):
     """
     Setting the global log level does not change loggers that have an
     explicit level set.
     """
     log = make_logger("info")
     self.assertEqual(log._log_level, "info")
     _logging.set_global_log_level("warn")
     self.assertEqual(log._log_level, "info")
Ejemplo n.º 7
0
 def test_set_global_changes_loggers(self):
     """
     Setting the global log level changes the level of all loggers that were
     not instantiated with a level.
     """
     log = make_logger()
     self.assertEqual(log._log_level, "info")
     _logging.set_global_log_level("warn")
     self.assertEqual(log._log_level, "warn")
Ejemplo n.º 8
0
    def test_basic(self):

        stream = NativeStringIO()
        observer = _logging.make_stdout_observer(_file=stream)
        log = make_logger(observer=observer)

        log.info("Hi!", log_system="foo")

        result = stream.getvalue()
        self.assertIn(u"[foo]", result)
Ejemplo n.º 9
0
    def test_logger_failure(self):
        """
        The failure method catches the in-flight exception.
        """
        log = make_logger("info", logger=Mock)

        try:
            1 / 0
        except:
            log.failure("Failure happened!")

        log.logger.failure.assert_called_once()
Ejemplo n.º 10
0
    def test_logger_failure_not_called(self):
        """
        The failure method isn't called under 'none'.
        """
        log = make_logger("none", logger=Mock)

        try:
            1 / 0
        except:
            log.failure("Failure happened!")

        self.assertEqual(log.logger.failure.call_count, 0)
Ejemplo n.º 11
0
    def test_logger_failure(self):
        """
        The failure method catches the in-flight exception.
        """
        log = make_logger("info", logger=Mock)

        try:
            1 / 0
        except:
            log.failure("Failure happened!")

        self.assertEqual(log.logger.failure.call_count, 1)
Ejemplo n.º 12
0
    def test_capturer(self):
        """
        The log capturer is a context manager that captures the logs emitted
        inside it.
        """
        log = _logging.make_logger("info")

        with _logging.LogCapturer() as l:
            log.info("Whee!", log_category="CB500", foo="bar")

        self.assertEqual(len(l.get_category("CB500")), 1)
        self.assertEqual(l.get_category("CB500")[0]["foo"], "bar")
Ejemplo n.º 13
0
    def test_logger_emits(self):
        """
        A Logger emits messages through to its child logger.
        """
        log = make_logger("trace", logger=Mock)

        log.error("Foo happened!!!")
        log.logger.error.assert_called_with("Foo happened!!!")

        log.warn("Stuff", foo="bar")
        log.logger.warn.assert_called_with("Stuff", foo="bar")

        log.trace("Stuff that's trace", foo="bar")
        log.logger.debug.assert_called_with("Stuff that's trace", foo="bar", cb_trace=1)
Ejemplo n.º 14
0
    def __init__(self, controller, id, who, keeplog=None):
        """
        Ctor.

        :param controller: The node controller this worker was created by.
        :type controller: instance of NodeControllerSession
        :param id: The ID of the worker.
        :type id: str
        :param who: Who triggered creation of this worker.
        :type who: str
        :param keeplog: If not `None`, buffer log message received to be later
                        retrieved via getlog(). If `0`, keep infinite log internally.
                        If `> 0`, keep at most such many log entries in buffer.
        :type keeplog: int or None
        """
        self._logger = make_logger()

        self._controller = controller

        self.id = id
        self.who = who
        self.pid = None
        self.status = "starting"

        self.created = datetime.utcnow()
        self.connected = None
        self.started = None

        self._log_entries = deque(maxlen=10)

        if platform.isWindows():
            self._log_fds = [2]
        else:
            self._log_fds = [1, 2]
        self._log_lineno = 0
        self._log_topic = 'crossbar.node.{}.worker.{}.on_log'.format(self._controller._node_id, self.id)

        self._log_rich = None  # Does not support rich logs

        # track stats for worker->controller traffic
        self._stats = {}
        self._stats_printer = None

        # A deferred that resolves when the worker is ready.
        self.ready = Deferred()

        # A deferred that resolves when the worker has exited.
        self.exit = Deferred()
        self.exit.addBoth(self._dump_remaining_log)
Ejemplo n.º 15
0
    def test_basic(self):
        """
        We can instantiate a RouterWorkerSession.
        """
        log_list = []

        r = router.RouterWorkerSession(config=self.config, reactor=reactor)
        r.log = make_logger(observer=log_list.append, log_level="debug")

        # Open the transport
        transport = FakeWAMPTransport(r)
        r.onOpen(transport)

        # XXX depends on log-text; perhaps a little flaky...
        self.assertIn("running as", log_list[-1]["log_format"])
Ejemplo n.º 16
0
    def test_basic(self):
        """
        We can instantiate a RouterWorkerSession.
        """
        log_list = []

        r = router.RouterWorkerSession(config=self.config)
        r.log = make_logger(observer=log_list.append, log_level="debug")

        # Open the transport
        transport = FakeWAMPTransport(r)
        r.onOpen(transport)

        # Should have 35 registers, all for the management interface
        self.assertEqual(len(transport._get(Register)), 35)
        self.assertIn("ready", log_list[-1]["log_format"])
Ejemplo n.º 17
0
    def test_basic(self):
        """
        We can instantiate a RouterWorkerSession.
        """
        log_list = []

        r = router.RouterWorkerSession(config=self.config, reactor=reactor)
        r.log = make_logger(observer=log_list.append, log_level="debug")

        # Open the transport
        transport = FakeWAMPTransport(r)
        r.onOpen(transport)

        # Should have 35 registers, all for the management interface
        self.assertEqual(len(transport._get(Register)), 35)
        # XXX depends on log-text; perhaps a little flaky...
        self.assertIn("running as", log_list[-1]["log_format"])
Ejemplo n.º 18
0
    def test_basic(self):
        """
        The JSON observer outputs a stream of log events.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        log.info("Hello")

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 4)
        self.assertEqual(log_entry["level"], u"info")
        self.assertEqual(log_entry["text"], u"Hello")
Ejemplo n.º 19
0
def run_command_init(options, **kwargs):
    """
    Subcommand "crossbar init".
    """
    log = make_logger()

    from crossbar.controller.template import Templates

    templates = Templates()

    if options.template not in templates:
        log.info(
            "Huh, sorry. There is no template named '{options.template}'. Try 'crossbar templates' to list the templates available.",
            options=options,
        )
        sys.exit(1)

    if options.appdir is None:
        options.appdir = "."
    else:
        if os.path.exists(options.appdir):
            raise Exception("app directory '{}' already exists".format(options.appdir))

        try:
            os.mkdir(options.appdir)
        except Exception as e:
            raise Exception("could not create application directory '{}' ({})".format(options.appdir, e))
        else:
            log.info("Crossbar.io application directory '{options.appdir}' created", options=options)

    options.appdir = os.path.abspath(options.appdir)

    log.info("Initializing application template '{options.template}' in directory '{options.appdir}'", options=options)
    get_started_hint = templates.init(options.appdir, options.template)

    log.info("Application template initialized")

    if get_started_hint:
        log.info("\n{}\n".format(get_started_hint))
    else:
        log.info(
            "\nTo start your node, run 'crossbar start --cbdir {cbdir}'\n",
            cbdir=os.path.abspath(os.path.join(options.appdir, ".crossbar")),
        )
Ejemplo n.º 20
0
def run_command_status(options, **kwargs):
    """
    Subcommand "crossbar status".
    """
    log = make_logger()

    # check if there is a Crossbar.io instance currently running from
    # the Crossbar.io node directory at all
    #
    pid_data = check_is_running(options.cbdir)
    if pid_data is None:
        # https://docs.python.org/2/library/os.html#os.EX_UNAVAILABLE
        # https://www.freebsd.org/cgi/man.cgi?query=sysexits&sektion=3
        log.info("No Crossbar.io instance is currently running from node directory {cbdir}.",
                 cbdir=options.cbdir)
        sys.exit(getattr(os, 'EX_UNAVAILABLE', 1))
    else:
        log.info("A Crossbar.io instance is running from node directory {cbdir} (PID {pid}).",
                 cbdir=options.cbdir, pid=pid_data['pid'])
        sys.exit(0)
Ejemplo n.º 21
0
    def test_logger_emits_if_higher(self):
        """
        A Logger that has a log level of a higher severity will not emit
        messages of a lower severity.
        """
        log = make_logger("info", logger=Mock)

        log.error("Error!")
        log.debug("Debug!")
        log.info("Info!")
        log.trace("Trace!")
        log.emit(LogLevel.info, "Infoooo!")

        self.assertEqual(log.logger.failure.call_count, 0)
        self.assertEqual(log.logger.critical.call_count, 0)
        self.assertEqual(log.logger.error.call_count, 1)
        self.assertEqual(log.logger.warn.call_count, 0)
        self.assertEqual(log.logger.info.call_count, 2)
        self.assertEqual(log.logger.debug.call_count, 0)
        self.assertEqual(log.logger.trace.call_count, 0)
    def test_failure(self):
        """
        Failures include the stacktrace.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            1 / 0
        except:
            log.failure("Oh no")

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 4)
        self.assertIn(u"ZeroDivisionError", log_entry["text"])
        self.assertIn(u"Oh no", log_entry["text"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 23
0
    def test_raising_during_encoding(self):
        """
        Non-JSON-serialisable parameters are repr()'d.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        class BadThing(object):
            def __repr__(self):
                raise Exception()

        log.info("hi {obj}", obj=BadThing())

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 3)
        self.assertIn(u"MESSAGE LOST", log_entry["text"])
        self.assertEqual(log_entry["level"], u"error")
Ejemplo n.º 24
0
    def test_failure(self):
        """
        Failures include the stacktrace.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            1 / 0
        except:
            log.failure("Oh no")

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 4)
        self.assertIn(u"ZeroDivisionError", log_entry["text"])
        self.assertIn(u"Oh no", log_entry["text"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 25
0
class NodeManagementSession(ApplicationSession):
    """
    This session is used for any uplink CDC connection.
    """

    log = make_logger()

    def onConnect(self):
        authid = self.config.extra['authid']
        realm = self.config.realm
        self.log.info("Connected. Joining realm '{}' as '{}' ..".format(
            realm, authid))
        self.join(realm, [u"wampcra"], authid)

    def onChallenge(self, challenge):
        if challenge.method == u"wampcra":
            authkey = self.config.extra['authkey'].encode('utf8')
            signature = auth.compute_wcs(
                authkey, challenge.extra['challenge'].encode('utf8'))
            return signature.decode('ascii')
        else:
            raise Exception(
                "don't know how to compute challenge for authmethod {}".format(
                    challenge.method))

    def onJoin(self, details):
        self.log.info("Joined realm '{realm}' on uplink CDC router",
                      realm=details.realm)
        self.config.extra['onready'].callback(self)

    def onLeave(self, details):
        if details.reason != u"wamp.close.normal":
            self.log.warn("Session detached: {}".format(details))
        else:
            self.log.debug("Session detached: {}".format(details))
        self.disconnect()

    def onDisconnect(self):
        self.log.debug("Disconnected.")
Ejemplo n.º 26
0
    def test_not_json_serialisable(self):
        """
        Non-JSON-serialisable parameters are repr()'d.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            1 / 0
        except:
            log.failure("Oh no", obj=observer)

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 5)
        self.assertIn(u"ZeroDivisionError", log_entry["text"])
        self.assertIn(u"Oh no", log_entry["text"])
        self.assertIn(u"<function ", log_entry["obj"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 27
0
    def test_repr_formatting(self):
        """
        Non-JSON-serialisable parameters are repr()'d, and any curly brackets
        in the result are escaped.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        class BracketThing(object):
            def __repr__(self):
                return "<BracketThing kwargs={}>"

        log.info("hi {obj}", obj=BracketThing())

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 5)
        self.assertEqual(u"hi <BracketThing kwargs={{}}>", log_entry["text"])
        self.assertEqual(log_entry["level"], u"info")
Ejemplo n.º 28
0
    def test_repr_formatting(self):
        """
        Non-JSON-serialisable parameters are repr()'d, and any curly brackets
        in the result are escaped.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        class BracketThing(object):
            def __repr__(self):
                return "<BracketThing kwargs={}>"

        log.info("hi {obj}", obj=BracketThing())

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 5)
        self.assertEqual(u"hi <BracketThing kwargs={{}}>", log_entry["text"])
        self.assertEqual(log_entry["level"], u"info")
Ejemplo n.º 29
0
    def test_not_json_serialisable(self):
        """
        Non-JSON-serialisable parameters are repr()'d.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            1 / 0
        except:
            log.failure("Oh no", obj=observer)

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 5)
        self.assertIn(u"ZeroDivisionError", log_entry["text"])
        self.assertIn(u"Oh no", log_entry["text"])
        self.assertIn(u"<function ", log_entry["obj"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 30
0
    def test_raising_during_encoding(self):
        """
        Non-JSON-serialisable parameters are repr()'d, and if that's impossible
        then the message is lost.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        class BadThing(object):
            def __repr__(self):
                raise Exception()

        log.info("hi {obj}", obj=BadThing())

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 3)
        self.assertIn(u"MESSAGE LOST", log_entry["text"])
        self.assertEqual(log_entry["level"], u"error")
Ejemplo n.º 31
0
    def test_start_router_component_fails(self):
        """
        Trying to start a class-based router component that gets an error on
        importing fails.
        """
        log_list = []

        r = router.RouterWorkerSession(config=self.config, reactor=reactor)
        r.log = make_logger(observer=log_list.append, log_level="debug")

        # Open the transport
        transport = FakeWAMPTransport(r)
        r.onOpen(transport)

        realm_config = {
            u"name": u"realm1",
            u'roles': [{u'name': u'anonymous',
                        u'permissions': [{u'subscribe': True,
                                          u'register': True, u'call': True,
                                          u'uri': u'*', u'publish': True}]}]
        }

        r.start_router_realm("realm1", realm_config)

        component_config = {
            "type": u"class",
            "classname": u"thisisathing.thatdoesnot.exist",
            "realm": u"realm1"
        }

        with self.assertRaises(ApplicationError) as e:
            r.start_router_component("newcomponent", component_config)

        self.assertIn(
            "Failed to import class 'thisisathing.thatdoesnot.exist'",
            str(e.exception.args[0]))

        self.assertEqual(len(r.get_router_components()), 0)
Ejemplo n.º 32
0
def run_command_status(options, **kwargs):
    """
    Subcommand "crossbar status".
    """
    log = make_logger()

    # check if there is a Crossbar.io instance currently running from
    # the Crossbar.io node directory at all
    #
    pid_data = check_is_running(options.cbdir)
    if pid_data is None:
        # https://docs.python.org/2/library/os.html#os.EX_UNAVAILABLE
        # https://www.freebsd.org/cgi/man.cgi?query=sysexits&sektion=3
        log.info(
            "No Crossbar.io instance is currently running from node directory {cbdir}.",
            cbdir=options.cbdir)
        sys.exit(getattr(os, 'EX_UNAVAILABLE', 1))
    else:
        log.info(
            "A Crossbar.io instance is running from node directory {cbdir} (PID {pid}).",
            cbdir=options.cbdir,
            pid=pid_data['pid'])
        sys.exit(0)
Ejemplo n.º 33
0
    def test_unicode_logs(self):
        """
        Unicode is JSON serialised correctly.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            if PY3:
                raise Exception(u"\u2603")
            else:
                raise Exception(u"\u2603".encode('utf-8'))
        except:
            log.failure("Oh no")

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 4)
        self.assertIn(u"\u2603", log_entry["text"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 34
0
    def test_unicode_logs(self):
        """
        Unicode is JSON serialised correctly.
        """
        stream = StringIO()
        observer = _logging.make_JSON_observer(stream)
        log = make_logger(observer=observer)

        try:
            if PY3:
                raise Exception(u"\u2603")
            else:
                raise Exception(u"\u2603".encode('utf-8'))
        except:
            log.failure("Oh no")

        result = stream.getvalue()
        log_entry = json.loads(result[:-1])

        self.assertEqual(result[-1], _logging.record_separator)
        self.assertEqual(len(log_entry.keys()), 4)
        self.assertIn(u"\u2603", log_entry["text"])
        self.assertEqual(log_entry["level"], u"critical")
Ejemplo n.º 35
0
class WebSocketTesteeServerProtocol(WebSocketServerProtocol):

    log = make_logger()

    def onMessage(self, payload, isBinary):
        self.sendMessage(payload, isBinary)

    def sendServerStatus(self, redirectUrl=None, redirectAfter=0):
        """
        Used to send out server status/version upon receiving a HTTP/GET without
        upgrade to WebSocket header (and option serverStatus is True).
        """
        try:
            page = self.factory._templates.get_template(
                'cb_ws_testee_status.html')
            self.sendHtml(
                page.render(redirectUrl=redirectUrl,
                            redirectAfter=redirectAfter,
                            cbVersion=crossbar.__version__,
                            wsUri=self.factory.url))
        except Exception as e:
            self.log.warn(
                "Error rendering WebSocket status page template: {}".format(e))
Ejemplo n.º 36
0
    def _cleanup_worker(reactor, worker):
        """
        This is called during reactor shutdown and ensures we wait for our
        subprocesses to shut down nicely.
        """
        log = make_logger()
        try:
            log.info("sending TERM to subprocess {pid}", pid=worker.pid)
            worker.proto.transport.signalProcess('TERM')
            # wait for the subprocess to shutdown; could add a timeout
            # after which we send a KILL maybe?
            d = Deferred()

            def protocol_closed(_):
                log.debug("{pid} exited", pid=worker.pid)
                d.callback(None)

            # await worker's timely demise
            worker.exit.addCallback(protocol_closed)

            def timeout(tried):
                if d.called:
                    return
                log.info("waiting for {pid} to exit...", pid=worker.pid)
                reactor.callLater(1, timeout, tried + 1)
                if tried > 20:  # or just wait forever?
                    log.info("Sending SIGKILL to {pid}", pid=worker.pid)
                    try:
                        worker.proto.transport.signalProcess('KILL')
                    except ProcessExitedAlready:
                        pass  # ignore; it's already dead
                    d.callback(None)  # or recurse more?

            timeout(0)
            return d
        except ProcessExitedAlready:
            pass  # ignore; it's already dead
Ejemplo n.º 37
0
def run_command_clean_cookies(options, **kwargs):
    """
    Subcommand "crossbar clean-cookies".
    """

    from crossbar.router.cookiestore import CookieFileCleaner, CookieStoreFileBacked

    log = make_logger()

    configfile = os.path.join(options.cbdir, options.config)

    with open(configfile, 'r') as infile:
        config = json.load(infile)  # type: dict

    for worker in config['workers']:
        for transport in worker['transports']:  # type: dict
            if 'cookie' in transport:
                store = transport['cookie']['store']
                if store['type'] == 'file':
                    cookiestore = CookieStoreFileBacked(store['filename'], transport['cookie'])
                    cleaner = CookieFileCleaner(cookiestore)
                    cleaner.clean()

                    log.info('{filename} cookie file cleaned.'.format(filename=store['filename']))
Ejemplo n.º 38
0
class Broker(object):
    """
    Basic WAMP broker.
    """

    log = make_logger()

    def __init__(self, router, options=None):
        """

        :param router: The router this dealer is part of.
        :type router: Object that implements :class:`crossbar.router.interfaces.IRouter`.
        :param options: Router options.
        :type options: Instance of :class:`crossbar.router.types.RouterOptions`.
        """
        self._router = router
        self._options = options or RouterOptions()

        # subscription map managed by this broker
        self._subscription_map = UriObservationMap()

        # map: session -> set of subscriptions (needed for detach)
        self._session_to_subscriptions = {}

        # check all topic URIs with strict rules
        self._option_uri_strict = self._options.uri_check == RouterOptions.URI_CHECK_STRICT

        # supported features from "WAMP Advanced Profile"
        self._role_features = role.RoleBrokerFeatures(
            publisher_identification=True,
            pattern_based_subscription=True,
            session_meta_api=True,
            subscription_meta_api=True,
            subscriber_blackwhite_listing=True,
            publisher_exclusion=True,
            subscription_revocation=True,
            payload_transparency=True,
            payload_encryption_cryptobox=True)

        # store for event history
        if self._router._store:
            self._event_store = self._router._store.event_store
        else:
            self._event_store = None

        # if there is a store, let the store attach itself to all the subscriptions
        # it is configured to track
        if self._event_store:
            self._event_store.attach_subscription_map(self._subscription_map)

    def attach(self, session):
        """
        Implements :func:`crossbar.router.interfaces.IBroker.attach`
        """
        if session not in self._session_to_subscriptions:
            self._session_to_subscriptions[session] = set()
        else:
            raise Exception(u"session with ID {} already attached".format(
                session._session_id))

    def detach(self, session):
        """
        Implements :func:`crossbar.router.interfaces.IBroker.detach`
        """
        if session in self._session_to_subscriptions:

            for subscription in self._session_to_subscriptions[session]:

                was_subscribed, was_last_subscriber = self._subscription_map.drop_observer(
                    session, subscription)

                # publish WAMP meta events
                #
                if self._router._realm:
                    service_session = self._router._realm.session
                    if service_session and not subscription.uri.startswith(
                            u'wamp.'):
                        if was_subscribed:
                            service_session.publish(
                                u'wamp.subscription.on_unsubscribe',
                                session._session_id, subscription.id)
                        if was_last_subscriber:
                            service_session.publish(
                                u'wamp.subscription.on_delete',
                                session._session_id, subscription.id)

            del self._session_to_subscriptions[session]

        else:
            raise Exception("session with ID {} not attached".format(
                session._session_id))

    def processPublish(self, session, publish):
        """
        Implements :func:`crossbar.router.interfaces.IBroker.processPublish`
        """
        # check topic URI: for PUBLISH, must be valid URI (either strict or loose), and
        # all URI components must be non-empty
        if self._option_uri_strict:
            uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(publish.topic)
        else:
            uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(publish.topic)

        if not uri_is_valid:
            if publish.acknowledge:
                reply = message.Error(
                    message.Publish.MESSAGE_TYPE, publish.request,
                    ApplicationError.INVALID_URI, [
                        u"publish with invalid topic URI '{0}' (URI strict checking {1})"
                        .format(publish.topic, self._option_uri_strict)
                    ])
                self._router.send(session, reply)
            return

        # disallow publication to topics starting with "wamp." and "crossbar." other than for
        # trusted sessions (that are sessions built into Crossbar.io)
        #
        if session._authrole is not None and session._authrole != u"trusted":
            is_restricted = publish.topic.startswith(
                u"wamp.") or publish.topic.startswith(u"crossbar.")
            if is_restricted:
                if publish.acknowledge:
                    reply = message.Error(
                        message.Publish.MESSAGE_TYPE, publish.request,
                        ApplicationError.INVALID_URI, [
                            u"publish with restricted topic URI '{0}'".format(
                                publish.topic)
                        ])
                    self._router.send(session, reply)
                return

        # get subscriptions active on the topic published to
        #
        subscriptions = self._subscription_map.match_observations(
            publish.topic)

        # check if the event is being persisted by checking if we ourself are among the observers
        # on _any_ matching subscription
        # we've been previously added to observer lists on subscriptions ultimately from
        # node configuration and during the broker starts up.
        store_event = False
        if self._event_store:
            for subscription in subscriptions:
                if self._event_store in subscription.observers:
                    store_event = True
                    break
        if store_event:
            self.log.debug('Persisting event on topic "{topic}"',
                           topic=publish.topic)

        # go on if (otherwise there isn't anything to do anyway):
        #
        #   - there are any active subscriptions OR
        #   - the publish is to be acknowledged OR
        #   - the event is to be persisted
        #
        if subscriptions or publish.acknowledge or store_event:

            # validate payload
            #
            if publish.payload is None:
                try:
                    self._router.validate('event', publish.topic, publish.args,
                                          publish.kwargs)
                except Exception as e:
                    if publish.acknowledge:
                        reply = message.Error(
                            message.Publish.MESSAGE_TYPE, publish.request,
                            ApplicationError.INVALID_ARGUMENT, [
                                u"publish to topic URI '{0}' with invalid application payload: {1}"
                                .format(publish.topic, e)
                            ])
                        self._router.send(session, reply)
                    return

            # authorize PUBLISH action
            #
            d = self._router.authorize(session, publish.topic, u'publish')

            def on_authorize_success(authorization):

                # the call to authorize the action _itself_ succeeded. now go on depending on whether
                # the action was actually authorized or not ..
                #
                if not authorization[u'allow']:

                    if publish.acknowledge:
                        reply = message.Error(
                            message.Publish.MESSAGE_TYPE, publish.request,
                            ApplicationError.NOT_AUTHORIZED, [
                                u"session not authorized to publish to topic '{0}'"
                                .format(publish.topic)
                            ])
                        self._router.send(session, reply)

                else:

                    # new ID for the publication
                    #
                    publication = util.id()

                    # persist event (this is done only once, regardless of the number of subscriptions
                    # the event matches on)
                    #
                    if store_event:
                        self._event_store.store_event(session._session_id,
                                                      publication,
                                                      publish.topic,
                                                      publish.args,
                                                      publish.kwargs)

                    # send publish acknowledge immediately when requested
                    #
                    if publish.acknowledge:
                        reply = message.Published(publish.request, publication)
                        self._router.send(session, reply)

                    # publisher disclosure
                    #
                    if authorization[u'disclose']:
                        publisher = session._session_id
                        publisher_authid = session._authid
                        publisher_authrole = session._authrole
                    else:
                        publisher = None
                        publisher_authid = None
                        publisher_authrole = None

                    # skip publisher
                    #
                    if publish.exclude_me is None or publish.exclude_me:
                        me_also = False
                    else:
                        me_also = True

                    # iterate over all subscriptions ..
                    #
                    for subscription in subscriptions:

                        # persist event history, but check if it is persisted on the individual subscription!
                        #
                        if store_event and self._event_store in subscription.observers:
                            self._event_store.store_event_history(
                                publication, subscription.id)

                        # initial list of receivers are all subscribers on a subscription ..
                        #
                        receivers = subscription.observers

                        # filter by "eligible" receivers
                        #
                        if publish.eligible:

                            # map eligible session IDs to eligible sessions
                            eligible = []
                            for session_id in publish.eligible:
                                if session_id in self._router._session_id_to_session:
                                    eligible.append(
                                        self._router.
                                        _session_id_to_session[session_id])

                            # filter receivers for eligible sessions
                            receivers = set(eligible) & receivers

                        # remove "excluded" receivers
                        #
                        if publish.exclude:

                            # map excluded session IDs to excluded sessions
                            exclude = []
                            for s in publish.exclude:
                                if s in self._router._session_id_to_session:
                                    exclude.append(
                                        self._router._session_id_to_session[s])

                            # filter receivers for excluded sessions
                            if exclude:
                                receivers = receivers - set(exclude)

                        # if receivers is non-empty, dispatch event ..
                        #
                        receivers_cnt = len(receivers) - (1 if self
                                                          in receivers else 0)
                        if receivers_cnt:

                            # for pattern-based subscriptions, the EVENT must contain
                            # the actual topic being published to
                            #
                            if subscription.match != message.Subscribe.MATCH_EXACT:
                                topic = publish.topic
                            else:
                                topic = None

                            if publish.payload:
                                msg = message.Event(
                                    subscription.id,
                                    publication,
                                    payload=publish.payload,
                                    publisher=publisher,
                                    publisher_authid=publisher_authid,
                                    publisher_authrole=publisher_authrole,
                                    topic=topic,
                                    enc_algo=publish.enc_algo,
                                    enc_key=publish.enc_key,
                                    enc_serializer=publish.enc_serializer)
                            else:
                                msg = message.Event(
                                    subscription.id,
                                    publication,
                                    args=publish.args,
                                    kwargs=publish.kwargs,
                                    publisher=publisher,
                                    publisher_authid=publisher_authid,
                                    publisher_authrole=publisher_authrole,
                                    topic=topic)
                            for receiver in receivers:
                                if (me_also or receiver != session
                                    ) and receiver != self._event_store:
                                    # the receiving subscriber session
                                    # might have no transport, or no
                                    # longer be joined
                                    if receiver._session_id and receiver._transport:
                                        self._router.send(receiver, msg)

            def on_authorize_error(err):
                """
                the call to authorize the action _itself_ failed (note this is
                different from the call to authorize succeed, but the
                authorization being denied)
                """
                self.log.failure(err)
                if publish.acknowledge:
                    reply = message.Error(
                        message.Publish.MESSAGE_TYPE, publish.request,
                        ApplicationError.AUTHORIZATION_FAILED, [
                            u"failed to authorize session for publishing to topic URI '{0}': {1}"
                            .format(publish.topic, err.value)
                        ])
                    self._router.send(session, reply)

            txaio.add_callbacks(d, on_authorize_success, on_authorize_error)

    def processSubscribe(self, session, subscribe):
        """
        Implements :func:`crossbar.router.interfaces.IBroker.processSubscribe`
        """
        # check topic URI: for SUBSCRIBE, must be valid URI (either strict or loose), and all
        # URI components must be non-empty for normal subscriptions, may be empty for
        # wildcard subscriptions and must be non-empty for all but the last component for
        # prefix subscriptions
        #
        if self._option_uri_strict:
            if subscribe.match == u"wildcard":
                uri_is_valid = _URI_PAT_STRICT_EMPTY.match(subscribe.topic)
            elif subscribe.match == u"prefix":
                uri_is_valid = _URI_PAT_STRICT_LAST_EMPTY.match(
                    subscribe.topic)
            else:
                uri_is_valid = _URI_PAT_STRICT_NON_EMPTY.match(subscribe.topic)
        else:
            if subscribe.match == u"wildcard":
                uri_is_valid = _URI_PAT_LOOSE_EMPTY.match(subscribe.topic)
            elif subscribe.match == u"prefix":
                uri_is_valid = _URI_PAT_LOOSE_LAST_EMPTY.match(subscribe.topic)
            else:
                uri_is_valid = _URI_PAT_LOOSE_NON_EMPTY.match(subscribe.topic)

        if not uri_is_valid:
            reply = message.Error(
                message.Subscribe.MESSAGE_TYPE, subscribe.request,
                ApplicationError.INVALID_URI, [
                    u"subscribe for invalid topic URI '{0}'".format(
                        subscribe.topic)
                ])
            self._router.send(session, reply)
            return

        # authorize SUBSCRIBE action
        #
        d = self._router.authorize(session, subscribe.topic, u'subscribe')

        def on_authorize_success(authorization):
            if not authorization[u'allow']:
                # error reply since session is not authorized to subscribe
                #
                reply = message.Error(
                    message.Subscribe.MESSAGE_TYPE, subscribe.request,
                    ApplicationError.NOT_AUTHORIZED, [
                        u"session is not authorized to subscribe to topic '{0}'"
                        .format(subscribe.topic)
                    ])

            else:
                # ok, session authorized to subscribe. now get the subscription
                #
                subscription, was_already_subscribed, is_first_subscriber = self._subscription_map.add_observer(
                    session, subscribe.topic, subscribe.match)

                if not was_already_subscribed:
                    self._session_to_subscriptions[session].add(subscription)

                # publish WAMP meta events
                #
                if self._router._realm:
                    service_session = self._router._realm.session
                    if service_session and not subscription.uri.startswith(
                            u'wamp.'):
                        if is_first_subscriber:
                            subscription_details = {
                                u'id': subscription.id,
                                u'created': subscription.created,
                                u'uri': subscription.uri,
                                u'match': subscription.match,
                            }
                            service_session.publish(
                                u'wamp.subscription.on_create',
                                session._session_id, subscription_details)
                        if not was_already_subscribed:
                            service_session.publish(
                                u'wamp.subscription.on_subscribe',
                                session._session_id, subscription.id)

                # acknowledge subscribe with subscription ID
                #
                reply = message.Subscribed(subscribe.request, subscription.id)

            # send out reply to subscribe requestor
            #
            self._router.send(session, reply)

        def on_authorize_error(err):
            """
            the call to authorize the action _itself_ failed (note this is
            different from the call to authorize succeed, but the
            authorization being denied)
            """
            # XXX same as another code-block, can we collapse?
            self.log.failure(err)
            reply = message.Error(
                message.Subscribe.MESSAGE_TYPE, subscribe.request,
                ApplicationError.AUTHORIZATION_FAILED, [
                    u"failed to authorize session for subscribing to topic URI '{0}': {1}"
                    .format(subscribe.topic, err.value)
                ])
            self._router.send(session, reply)

        txaio.add_callbacks(d, on_authorize_success, on_authorize_error)

    def processUnsubscribe(self, session, unsubscribe):
        """
        Implements :func:`crossbar.router.interfaces.IBroker.processUnsubscribe`
        """
        # get subscription by subscription ID or None (if it doesn't exist on this broker)
        #
        subscription = self._subscription_map.get_observation_by_id(
            unsubscribe.subscription)

        if subscription:

            if session in subscription.observers:

                was_subscribed, was_last_subscriber = self._unsubscribe(
                    subscription, session)

                reply = message.Unsubscribed(unsubscribe.request)
            else:
                # subscription exists on this broker, but the session that wanted to unsubscribe wasn't subscribed
                #
                reply = message.Error(message.Unsubscribe.MESSAGE_TYPE,
                                      unsubscribe.request,
                                      ApplicationError.NO_SUCH_SUBSCRIPTION)

        else:
            # subscription doesn't even exist on this broker
            #
            reply = message.Error(message.Unsubscribe.MESSAGE_TYPE,
                                  unsubscribe.request,
                                  ApplicationError.NO_SUCH_SUBSCRIPTION)

        self._router.send(session, reply)

    def _unsubscribe(self, subscription, session):

        # drop session from subscription observers
        #
        was_subscribed, was_last_subscriber = self._subscription_map.drop_observer(
            session, subscription)

        # remove subscription from session->subscriptions map
        #
        if was_subscribed:
            self._session_to_subscriptions[session].discard(subscription)

        # publish WAMP meta events
        #
        if self._router._realm:
            service_session = self._router._realm.session
            if service_session and not subscription.uri.startswith(u'wamp.'):
                if was_subscribed:
                    service_session.publish(
                        u'wamp.subscription.on_unsubscribe',
                        session._session_id, subscription.id)
                if was_last_subscriber:
                    service_session.publish(u'wamp.subscription.on_delete',
                                            session._session_id,
                                            subscription.id)

        return was_subscribed, was_last_subscriber

    def removeSubscriber(self, subscription, session, reason=None):
        """
        Actively unsubscribe a subscriber session from a subscription.
        """
        was_subscribed, was_last_subscriber = self._unsubscribe(
            subscription, session)

        if 'subscriber' in session._session_roles and session._session_roles[
                'subscriber'] and session._session_roles[
                    'subscriber'].subscription_revocation:
            reply = message.Unsubscribed(0,
                                         subscription=subscription.id,
                                         reason=reason)
            self._router.send(session, reply)

        return was_subscribed, was_last_subscriber
Ejemplo n.º 39
0
class TlsServerContextFactory(DefaultOpenSSLContextFactory):
    """
    TLS context factory for use with Twisted.

    Like the default

       http://twistedmatrix.com/trac/browser/tags/releases/twisted-11.1.0/twisted/internet/ssl.py#L42

    but loads key/cert from string, not file and supports chained certificates.

    See also:

       http://pyopenssl.sourceforge.net/pyOpenSSL.html/openssl-context.html
       http://www.openssl.org/docs/ssl/SSL_CTX_use_certificate.html

    Chained certificates:
       The certificates must be in PEM format and must be sorted starting with
       the subject's certificate (actual client or server certificate), followed
       by intermediate CA certificates if applicable, and ending at the
       highest level (root) CA.

    Hardening:
       http://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
       https://www.ssllabs.com/ssltest/analyze.html?d=www.example.com
    """

    log = make_logger()

    def __init__(self,
                 privateKeyString,
                 certificateString,
                 chainedCertificate=True,
                 dhParamFilename=None,
                 ciphers=None):
        self._privateKeyString = str(privateKeyString).encode('utf8')
        self._certificateString = str(certificateString).encode('utf8')
        self._chainedCertificate = chainedCertificate
        self._dhParamFilename = str(
            dhParamFilename) if dhParamFilename else None
        self._ciphers = str(ciphers) if ciphers else None

        # do a SSLv2-compatible handshake even for TLS
        #
        self.sslmethod = SSL.SSLv23_METHOD

        self._contextFactory = SSL.Context
        self.cacheContext()

    def cacheContext(self):
        if self._context is None:
            ctx = self._contextFactory(self.sslmethod)

            # SSL hardening
            #
            ctx.set_options(SSL_DEFAULT_OPTIONS)

            if self._ciphers:
                ctx.set_cipher_list(self._ciphers)
                self.log.info("Using explicit cipher list.")
            else:
                ctx.set_cipher_list(SSL_DEFAULT_CIPHERS)
                self.log.info("Using default cipher list.")

            # Activate DH(E)
            #
            # http://linux.die.net/man/3/ssl_ctx_set_tmp_dh
            # http://linux.die.net/man/1/dhparam
            #
            if self._dhParamFilename:
                try:
                    ctx.load_tmp_dh(self._dhParamFilename)
                except Exception:
                    self.log.failure(
                        "Error: OpenSSL DH modes not active - failed to load DH parameter file [{log_failure}]"
                    )
                else:
                    self.log.info(
                        "Ok, OpenSSL Diffie-Hellman ciphers parameter file loaded."
                    )
            else:
                self.log.warn(
                    "OpenSSL DH modes not active - missing DH param file")

            # Activate ECDH(E)
            #
            # This needs pyOpenSSL 0.15
            #
            try:
                # without setting a curve, ECDH won't be available even if listed
                # in SSL_DEFAULT_CIPHERS!
                # curve must be one of OpenSSL.crypto.get_elliptic_curves()
                #
                curve = crypto.get_elliptic_curve(ECDH_DEFAULT_CURVE_NAME)
                ctx.set_tmp_ecdh(curve)
            except Exception:
                self.log.failure(
                    "Warning: OpenSSL failed to set ECDH default curve [{log_failure}]"
                )
            else:
                self.log.info(
                    "Ok, OpenSSL is using ECDH elliptic curve {curve}",
                    curve=ECDH_DEFAULT_CURVE_NAME)

            # load certificate (chain) into context
            #
            if not self._chainedCertificate:
                cert = crypto.load_certificate(crypto.FILETYPE_PEM,
                                               self._certificateString)
                ctx.use_certificate(cert)
            else:
                # http://pyopenssl.sourceforge.net/pyOpenSSL.html/openssl-context.html
                # there is no "use_certificate_chain" function, so we need to create
                # a temporary file writing the certificate chain file
                f = tempfile.NamedTemporaryFile(delete=False)
                f.write(self._certificateString)
                f.close()
                ctx.use_certificate_chain_file(f.name)

            # load private key into context
            #
            key = crypto.load_privatekey(crypto.FILETYPE_PEM,
                                         self._privateKeyString)
            ctx.use_privatekey(key)
            ctx.check_privatekey()

            # set cached context
            #
            self._context = ctx
Ejemplo n.º 40
0
def check_is_running(cbdir):
    """
    Check if a Crossbar.io node is already running on a Crossbar.io node directory.

    :param cbdir: The Crossbar.io node directory to check.
    :type cbdir: str

    :returns: The PID of the running Crossbar.io controller process or ``None``
    :rtype: int or None
    """
    log = make_logger()

    remove_PID_type = None
    remove_PID_reason = None

    fp = os.path.join(cbdir, _PID_FILENAME)

    if os.path.isfile(fp):
        with open(fp) as fd:
            pid_data_str = fd.read()
            try:
                pid_data = json.loads(pid_data_str)
                pid = int(pid_data['pid'])
            except ValueError:
                remove_PID_type = "corrupt"
                remove_PID_reason = "corrupt .pid file"
            else:
                if sys.platform == 'win32' and not _HAS_PSUTIL:
                    # when on Windows, and we can't actually determine if the PID exists,
                    # just assume it exists
                    return pid_data
                else:
                    pid_exists = check_pid_exists(pid)
                    if pid_exists:
                        if _HAS_PSUTIL:
                            # additionally check this is actually a crossbar process
                            p = psutil.Process(pid)
                            cmdline = p.cmdline()
                            if not _is_crossbar_process(cmdline):
                                nicecmdline = ' '.join(cmdline)
                                if len(nicecmdline) > 76:
                                    nicecmdline = nicecmdline[:
                                                              38] + ' ... ' + nicecmdline[
                                                                  -38:]
                                log.info(
                                    '"{}" points to PID {} which is not a crossbar process:'
                                    .format(fp, pid))
                                log.info('  ' + nicecmdline)
                                log.info(
                                    'Verify manually and either kill {} or delete {}'
                                    .format(pid, fp))
                                return None
                        return pid_data
                    else:
                        remove_PID_type = "stale"
                        remove_PID_reason = "pointing to non-existing process with PID {}".format(
                            pid)

    if remove_PID_type:
        # If we have to remove a PID, do it here.
        try:
            os.remove(fp)
        except:
            log.info(("Could not remove {pidtype} Crossbar.io PID file "
                      "({reason}) {fp} - {log_failure}"),
                     pidtype=remove_PID_type,
                     reason=remove_PID_reason,
                     fp=fp)
        else:
            log.info("{pidtype} Crossbar.io PID file ({reason}) {fp} removed",
                     pidtype=remove_PID_type.title(),
                     reason=remove_PID_reason,
                     fp=fp)

    return None
Ejemplo n.º 41
0
def run_command_version(options, reactor=None, **kwargs):
    """
    Subcommand "crossbar version".
    """
    log = make_logger()
    # verbose = True

    # Python
    py_ver = '.'.join([str(x) for x in list(sys.version_info[:3])])
    py_ver_string = "[%s]" % sys.version.replace('\n', ' ')

    if 'pypy_version_info' in sys.__dict__:
        py_ver_detail = "{}-{}".format(
            platform.python_implementation(),
            '.'.join(str(x) for x in sys.pypy_version_info[:3]))
    else:
        py_ver_detail = platform.python_implementation()

    # Twisted / Reactor
    tx_ver = "%s-%s" % (pkg_resources.require("Twisted")[0].version,
                        reactor.__class__.__name__)
    tx_loc = "[%s]" % qual(reactor.__class__)

    # txaio
    txaio_ver = 'txaio-%s' % pkg_resources.require("txaio")[0].version

    # Autobahn
    from autobahn.websocket.protocol import WebSocketProtocol
    ab_ver = pkg_resources.require("autobahn")[0].version
    ab_loc = "[%s]" % qual(WebSocketProtocol)

    # UTF8 Validator
    from autobahn.websocket.utf8validator import Utf8Validator
    s = qual(Utf8Validator)
    if 'wsaccel' in s:
        utf8_ver = 'wsaccel-%s' % pkg_resources.require('wsaccel')[0].version
    elif s.startswith('autobahn'):
        utf8_ver = 'autobahn'
    else:
        # could not detect UTF8 validator type/version
        utf8_ver = '?'
    utf8_loc = "[%s]" % qual(Utf8Validator)

    # XOR Masker
    from autobahn.websocket.xormasker import XorMaskerNull
    s = qual(XorMaskerNull)
    if 'wsaccel' in s:
        xor_ver = 'wsaccel-%s' % pkg_resources.require('wsaccel')[0].version
    elif s.startswith('autobahn'):
        xor_ver = 'autobahn'
    else:
        # could not detect XOR masker type/version
        xor_ver = '?'
    xor_loc = "[%s]" % qual(XorMaskerNull)

    # JSON Serializer
    supported_serializers = ['JSON']
    from autobahn.wamp.serializer import JsonObjectSerializer
    s = str(JsonObjectSerializer.JSON_MODULE)
    if 'ujson' in s:
        json_ver = 'ujson-%s' % pkg_resources.require('ujson')[0].version
    else:
        json_ver = 'stdlib'

    # MsgPack Serializer
    try:
        import msgpack  # noqa
        msgpack_ver = 'msgpack-python-%s' % pkg_resources.require(
            'msgpack-python')[0].version
        supported_serializers.append('MessagePack')
    except ImportError:
        msgpack_ver = '-'

    # CBOR Serializer
    try:
        import cbor  # noqa
        cbor_ver = 'cbor-%s' % pkg_resources.require('cbor')[0].version
        supported_serializers.append('CBOR')
    except ImportError:
        cbor_ver = '-'

    # LMDB
    try:
        import lmdb  # noqa
        lmdb_lib_ver = '.'.join([str(x) for x in lmdb.version()])
        lmdb_ver = '{}/lmdb-{}'.format(
            pkg_resources.require('lmdb')[0].version, lmdb_lib_ver)
    except ImportError:
        lmdb_ver = '-'

    def decorate(text):
        return click.style(text, fg='yellow', bold=True)

    for line in BANNER.splitlines():
        log.info(decorate("{:>40}".format(line)))

    pad = " " * 22

    log.info(" Crossbar.io        : {ver}", ver=decorate(crossbar.__version__))
    log.info("   Autobahn         : {ver} (with {serializers})",
             ver=decorate(ab_ver),
             serializers=', '.join(supported_serializers))
    log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(ab_loc))
    log.debug("     txaio          : {ver}", ver=decorate(txaio_ver))
    log.debug("     UTF8 Validator : {ver}", ver=decorate(utf8_ver))
    log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(utf8_loc))
    log.debug("     XOR Masker     : {ver}", ver=decorate(xor_ver))
    log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(xor_loc))
    log.debug("     JSON Codec     : {ver}", ver=decorate(json_ver))
    log.debug("     MsgPack Codec  : {ver}", ver=decorate(msgpack_ver))
    log.debug("     CBOR Codec     : {ver}", ver=decorate(cbor_ver))
    log.info("   Twisted          : {ver}", ver=decorate(tx_ver))
    log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(tx_loc))
    log.info("   LMDB             : {ver}", ver=decorate(lmdb_ver))
    log.info("   Python           : {ver}/{impl}",
             ver=decorate(py_ver),
             impl=decorate(py_ver_detail))
    log.trace("{pad}{debuginfo}", pad=pad, debuginfo=decorate(py_ver_string))
    log.info(" OS                 : {ver}", ver=decorate(platform.platform()))
    log.info(" Machine            : {ver}", ver=decorate(platform.machine()))
    log.info("")
Ejemplo n.º 42
0
class ListeningEndpointTests(TestCase):

    log = make_logger()

    def setUp(self):
        self.cbdir = self.mktemp()
        FilePath(self.cbdir).makedirs()
        return super(ListeningEndpointTests, self).setUp()

    def test_unix(self):
        """
        A config with type = "unix" will create an endpoint for a UNIX socket
        at the given path.
        """
        path = FilePath("/tmp").child(uuid4().hex).path
        self.addCleanup(os.remove, path)

        reactor = SelectReactor()
        config = {"type": "unix", "path": path}

        endpoint = create_listening_endpoint_from_config(
            config, self.cbdir, reactor, self.log)
        self.assertTrue(isinstance(endpoint, UNIXServerEndpoint))

        factory = Factory.forProtocol(Echo)
        endpoint.listen(factory)

        self.assertIn(
            factory,
            [getattr(x, "factory", None) for x in reactor.getReaders()])

    def test_unix_already_listening(self):
        """
        A config with type = "unix" will create an endpoint for a UNIX socket
        at the given path, and delete it if required.
        """
        path = FilePath("/tmp").child(uuid4().hex).path
        self.addCleanup(os.remove, path)

        # Something is already there
        FilePath(path).setContent(b"")

        reactor = SelectReactor()
        config = {"type": "unix", "path": path}

        endpoint = create_listening_endpoint_from_config(
            config, self.cbdir, reactor, self.log)
        self.assertTrue(isinstance(endpoint, UNIXServerEndpoint))

        factory = Factory.forProtocol(Echo)
        endpoint.listen(factory)

        self.assertIn(
            factory,
            [getattr(x, "factory", None) for x in reactor.getReaders()])

    def test_unix_already_listening_cant_delete(self):
        """
        A config with type = "unix" will create an endpoint for a UNIX socket
        at the given path, and delete it if required. If it can't delete it, it
        will raise an exception.
        """
        parent_fp = FilePath("/tmp").child(uuid4().hex)
        parent_fp.makedirs()
        fp = parent_fp.child(uuid4().hex)

        # Something is already there
        fp.setContent(b"")
        fp.chmod(0o544)
        parent_fp.chmod(0o544)

        reactor = SelectReactor()
        config = {"type": "unix", "path": fp.path}

        with self.assertRaises(OSError) as e:
            create_listening_endpoint_from_config(config, self.cbdir, reactor,
                                                  self.log)
        self.assertEqual(e.exception.errno, 13)  # Permission Denied

        parent_fp.chmod(0o777)
        parent_fp.remove()

    if platform.isWindows():
        _ = "Windows does not have UNIX sockets"
        test_unix.skip = _
        test_unix_already_listening.skip = _
        test_unix_already_listening_cant_delete.skip = _
        del _
Ejemplo n.º 43
0
from __future__ import absolute_import

import os

from twisted.python.failure import Failure

from autobahn.twisted import websocket
from autobahn.twisted import rawsocket
from autobahn.websocket.compress import *  # noqa

import crossbar

from crossbar.router.cookiestore import CookieStoreMemoryBacked, CookieStoreFileBacked
from crossbar._logging import make_logger

log = make_logger()

__all__ = (
    'WampWebSocketServerFactory',
    'WampRawSocketServerFactory',
    'WampWebSocketServerProtocol',
    'WampRawSocketServerProtocol',
    'WampWebSocketClientFactory',
    'WampRawSocketClientFactory',
    'WampWebSocketClientProtocol',
    'WampRawSocketClientProtocol',
)


def set_websocket_options(factory, options):
    """
Ejemplo n.º 44
0
def run():
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.

    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # create the top-level parser
    #
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--reactor',
                        default=None,
                        choices=['select', 'poll', 'epoll', 'kqueue', 'iocp'],
                        help='Explicit Twisted reactor selection (optional).')

    parser.add_argument('--loglevel',
                        default="info",
                        choices=['none', 'error', 'warn', 'info', 'debug', 'trace'],
                        help='Initial log level.')

    parser.add_argument('-c',
                        '--cbdir',
                        type=str,
                        help="Crossbar.io node directory (required).")

    parser.add_argument('-n',
                        '--node',
                        type=str,
                        help='Crossbar.io node ID (required).')

    parser.add_argument('-w',
                        '--worker',
                        type=str,
                        help='Crossbar.io worker ID (required).')

    parser.add_argument('-r',
                        '--realm',
                        type=str,
                        help='Crossbar.io node (management) realm (required).')

    parser.add_argument('-t',
                        '--type',
                        choices=['router', 'container'],
                        help='Worker type (required).')

    parser.add_argument('--title',
                        type=str,
                        default=None,
                        help='Worker process title to set (optional).')

    options = parser.parse_args()

    # make sure logging to something else than stdio is setup _first_
    #
    from crossbar._logging import make_JSON_observer, cb_logging_aware, _stderr
    from crossbar._logging import make_logger, log_publisher, start_logging
    from crossbar._logging import set_global_log_level

    # Set the global log level
    set_global_log_level(options.loglevel)

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=_stderr)
    _stderr.flush()

    flo = make_JSON_observer(_stderr)
    log_publisher.addObserver(flo)
    start_logging()

    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        # set process title if requested to
        #
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            WORKER_TYPE_TO_TITLE = {
                'router': 'crossbar-worker [router]',
                'container': 'crossbar-worker [container]'
            }
            setproctitle.setproctitle(WORKER_TYPE_TO_TITLE[options.type].strip())

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    from twisted.python.reflect import qual
    log.info("Worker running under {python}-{reactor}",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    from crossbar.worker.router import RouterWorkerSession
    from crossbar.worker.container import ContainerWorkerSession

    WORKER_TYPE_TO_CLASS = {
        'router': RouterWorkerSession,
        'container': ContainerWorkerSession
    }

    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                log.warn("Connection to node controller lost.")
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                pass
            finally:
                # losing the connection to the node controller is fatal:
                # stop the reactor and exit with error
                log.info("No more controller connection; shutting down.")
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # create a WAMP application session factory
        #
        from autobahn.twisted.wamp import ApplicationSessionFactory
        from autobahn.wamp.types import ComponentConfig

        session_config = ComponentConfig(realm=options.realm, extra=options)
        session_factory = ApplicationSessionFactory(session_config)
        session_factory.session = WORKER_TYPE_TO_CLASS[options.type]

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(session_factory, "ws://localhost", debug=False, debug_wamp=False)
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info("Entering event loop...")
            reactor.run()

            vmprof.disable()
        else:
            log.debug("Entering event loop...")
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {}".format(e))
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
Ejemplo n.º 45
0
class Router(object):
    """
    Crossbar.io core router class.
    """
    log = make_logger()

    RESERVED_ROLES = [u'trusted']
    """
    Roles with these URIs are built-in and cannot be added/dropped.
    """

    broker = Broker
    """
    The broker class this router will use.
    """

    dealer = Dealer
    """
    The dealer class this router will use.
    """
    def __init__(self, factory, realm, options=None, store=None):
        """

        :param factory: The router factory this router was created by.
        :type factory: Object that implements :class:`autobahn.wamp.interfaces.IRouterFactory`..
        :param realm: The realm this router is working for.
        :type realm: str
        :param options: Router options.
        :type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
        """
        self._factory = factory
        self._options = options or RouterOptions()
        self._store = store
        self._realm = realm
        self.realm = realm.config[u'name']

        self._trace_traffic = False
        self._trace_traffic_roles_include = None
        self._trace_traffic_roles_exclude = [u'trusted']

        # map: session_id -> session
        self._session_id_to_session = {}

        self._broker = self.broker(self, self._options)
        self._dealer = self.dealer(self, self._options)
        self._attached = 0

        self._roles = {u'trusted': RouterTrustedRole(self, u'trusted')}

    def attach(self, session):
        """
        Implements :func:`autobahn.wamp.interfaces.IRouter.attach`
        """
        if session._session_id not in self._session_id_to_session:
            if _is_client_session(session):
                self._session_id_to_session[session._session_id] = session
            else:
                self.log.debug("attaching non-client session {session}",
                               session=session)
        else:
            raise Exception("session with ID {} already attached".format(
                session._session_id))

        self._broker.attach(session)
        self._dealer.attach(session)

        self._attached += 1

        return {
            u'broker': self._broker._role_features,
            u'dealer': self._dealer._role_features
        }

    def detach(self, session):
        """
        Implements :func:`autobahn.wamp.interfaces.IRouter.detach`
        """
        self._broker.detach(session)
        self._dealer.detach(session)

        if session._session_id in self._session_id_to_session:
            del self._session_id_to_session[session._session_id]
        else:
            if _is_client_session(session):
                raise Exception("session with ID {} not attached".format(
                    session._session_id))

        self._attached -= 1
        if not self._attached:
            self._factory.onLastDetach(self)

    def _check_trace(self, session, msg):
        if not self._trace_traffic:
            return False
        if self._trace_traffic_roles_include and session._authrole not in self._trace_traffic_roles_include:
            return False
        if self._trace_traffic_roles_exclude and session._authrole in self._trace_traffic_roles_exclude:
            return False
        return True

    def send(self, session, msg):
        if self._check_trace(session, msg):
            self.log.info("<<TX<< {msg}", msg=msg)
        session._transport.send(msg)

    def process(self, session, msg):
        """
        Implements :func:`autobahn.wamp.interfaces.IRouter.process`
        """
        if self._check_trace(session, msg):
            self.log.info(">>RX>> {msg}", msg=msg)

        # Broker
        #
        if isinstance(msg, message.Publish):
            self._broker.processPublish(session, msg)

        elif isinstance(msg, message.Subscribe):
            self._broker.processSubscribe(session, msg)

        elif isinstance(msg, message.Unsubscribe):
            self._broker.processUnsubscribe(session, msg)

        # Dealer
        #
        elif isinstance(msg, message.Register):
            self._dealer.processRegister(session, msg)

        elif isinstance(msg, message.Unregister):
            self._dealer.processUnregister(session, msg)

        elif isinstance(msg, message.Call):
            self._dealer.processCall(session, msg)

        elif isinstance(msg, message.Cancel):
            self._dealer.processCancel(session, msg)

        elif isinstance(msg, message.Yield):
            self._dealer.processYield(session, msg)

        elif isinstance(
                msg, message.Error
        ) and msg.request_type == message.Invocation.MESSAGE_TYPE:
            self._dealer.processInvocationError(session, msg)

        else:
            raise ProtocolError("Unexpected message {0}".format(msg.__class__))

    def has_role(self, uri):
        """
        Check if a role with given URI exists on this router.

        :returns: bool - `True` if a role under the given URI exists on this router.
        """
        return uri in self._roles

    def add_role(self, role):
        """
        Adds a role to this router.

        :param role: The role to add.
        :type role: An instance of :class:`crossbar.router.session.CrossbarRouterRole`.

        :returns: bool -- `True` if a role under the given URI actually existed before and was overwritten.
        """
        self.log.debug("CrossbarRouter.add_role({role})", role=role)

        if role.uri in self.RESERVED_ROLES:
            raise Exception("cannot add reserved role '{}'".format(role.uri))

        overwritten = role.uri in self._roles

        self._roles[role.uri] = role

        return overwritten

    def drop_role(self, role):
        """
        Drops a role from this router.

        :param role: The role to drop.
        :type role: An instance of :class:`crossbar.router.session.CrossbarRouterRole`.

        :returns: bool -- `True` if a role under the given URI actually existed and was removed.
        """
        self.log.debug("CrossbarRouter.drop_role({role})", role=role)

        if role.uri in self.RESERVED_ROLES:
            raise Exception("cannot drop reserved role '{}'".format(role.uri))

        if role.uri in self._roles:
            del self._roles[role.uri]
            return True
        else:
            return False

    def authorize(self, session, uri, action):
        """
        Authorizes a session for an action on an URI.

        Implements :func:`autobahn.wamp.interfaces.IRouter.authorize`
        """
        assert (type(uri) == six.text_type)
        assert (action in [u'call', u'register', u'publish', u'subscribe'])

        # the role under which the session that wishes to perform the given action on
        # the given URI was authenticated under
        role = session._authrole

        if role in self._roles:
            # the authorizer procedure of the role which we will call ..
            authorize = self._roles[role].authorize
            d = txaio.as_future(authorize, session, uri, action)
        else:
            # normally, the role should exist on the router (and hence we should not arrive
            # here), but the role might have been dynamically removed - and anyway, safety first!
            d = txaio.create_future_success(False)

        def got_authorization(authorization):
            # backward compatibility
            if type(authorization) == bool:
                authorization = {u'allow': authorization, u'cache': False}
                if action in [u'call', u'publish']:
                    authorization[u'disclose'] = False

            self.log.info(
                "Authorized action '{action}' for URI '{uri}' by session {session_id} with authid '{authid}' and authrole '{authrole}' -> authorization: {authorization}",
                session_id=session._session_id,
                uri=uri,
                action=action,
                authid=session._authid,
                authrole=session._authrole,
                authorization=authorization)

            return authorization

        d.addCallback(got_authorization)
        return d

    def validate(self, payload_type, uri, args, kwargs):
        """
        Implements :func:`autobahn.wamp.interfaces.IRouter.validate`
        """
        self.log.debug("Validate '{payload_type}' for '{uri}'",
                       payload_type=payload_type,
                       uri=uri,
                       cb_level="trace")
Ejemplo n.º 46
0
class NodeManagementBridgeSession(ApplicationSession):

    """
    The management bridge is a WAMP session that lives on the local management router,
    but has access to a 2nd WAMP session that lives on the uplink CDC router.

    The bridge is responsible for forwarding calls from CDC into the local node,
    and for forwarding events from the local node to CDC.
    """

    log = make_logger()

    def __init__(self, config, management_session):
        """

        :param config: Session configuration.
        :type config: instance of `autobahn.wamp.types.ComponentConfig`
        :param management_session: uplink session.
        :type management_session: instance of `autobahn.wamp.protocol.ApplicationSession`
        """
        ApplicationSession.__init__(self, config)
        self._management_session = management_session
        self._regs = {}

    @inlineCallbacks
    def onJoin(self, details):

        self.log.debug("Joined realm '{realm}' on node management router", realm=details.realm)

        @inlineCallbacks
        def on_event(*args, **kwargs):
            details = kwargs.pop('details')
            topic = u"cdc." + details.topic
            try:
                yield self._management_session.publish(topic, *args, options=PublishOptions(acknowledge=True), **kwargs)
            except Exception as e:
                self.log.error(e)
            else:
                self.log.debug("Forwarded event on topic '{topic}'", topic=topic)

        yield self.subscribe(on_event, u"crossbar.node", options=SubscribeOptions(match=u"prefix", details_arg="details"))

        # we use the WAMP meta API implemented by CB to get notified whenever a procedure is
        # registered/unregister on the node management router, setup a forwarding procedure
        # and register that on the uplink CDC router
        #
        @inlineCallbacks
        def on_registration_create(session_id, registration):
            uri = registration['uri']

            def forward_call(*args, **kwargs):
                return self.call(uri, *args, **kwargs)

            reg = yield self._management_session.register(forward_call, uri)
            self._regs[registration['id']] = reg

            self.log.debug("Forwarding procedure: {procedure}", procedure=reg.procedure)

        yield self.subscribe(on_registration_create, u'wamp.registration.on_create')

        @inlineCallbacks
        def on_registration_delete(session_id, registration_id):
            reg = self._regs.pop(registration_id, None)

            if reg:
                yield reg.unregister()
                self.log.debug("Removed forwarding of procedure {procedure}", procedure=reg.procedure)
            else:
                self.log.warn("Could not remove forwarding for unmapped registration_id {reg_id}", reg_id=registration_id)

        yield self.subscribe(on_registration_delete, u'wamp.registration.on_delete')

        self.log.info("Management bridge ready")
Ejemplo n.º 47
0
class FileUploadResource(Resource):
    """
    Twisted Web resource that handles file uploads over `HTTP/POST` requests.
    """

    log = make_logger()

    def __init__(self,
                 upload_directory,
                 temp_directory,
                 form_fields,
                 upload_session,
                 options=None):
        """

        :param upload_directory: The target directory where uploaded files will be stored.
        :type upload_directory: str
        :param temp_directory: A temporary directory where chunks of a file being uploaded are stored.
        :type temp_directory: str
        :param form_fields: Names of HTML form fields used for uploading.
        :type form_fields: dict
        :param upload_session: An instance of `ApplicationSession` used for publishing progress events.
        :type upload_session: obj
        :param options: Options for file upload.
        :type options: dict or None
        """

        Resource.__init__(self)
        self._dir = upload_directory
        self._tempDir = temp_directory
        self._form_fields = form_fields
        self._fileupload_session = upload_session
        self._options = options or {}
        self._max_file_size = self._options.get('max_file_size',
                                                10 * 1024 * 1024)
        self._fileTypes = self._options.get('file_types', None)
        self._file_permissions = self._options.get('file_permissions', None)

        # track uploaded files / chunks
        self._uploads = {}

        # scan the temp dir for uploaded chunks and fill the _uploads dict with it
        # so existing uploads can be resumed
        for fileTempDir in os.listdir(self._tempDir):
            ft = os.path.join(self._tempDir, fileTempDir)
            if os.path.isdir(ft):
                self._uploads[fileTempDir] = {
                    'chunk_list': {},
                    'origin': 'startup'
                }
                for chunk in os.listdir(ft):
                    if chunk[:6] == 'chunk_':
                        self._uploads[fileTempDir]['chunk_list'][int(
                            chunk[6:])] = True

        self.log.debug("Scanned pending uploads: {uploads}",
                       uploads=self._uploads)

    def render_POST(self, request):
        headers = request.getAllHeaders()

        origin = headers['host']

        content = cgi.FieldStorage(fp=request.content,
                                   headers=headers,
                                   environ={
                                       'REQUEST_METHOD': 'POST',
                                       'CONTENT_TYPE': headers['content-type']
                                   })

        f = self._form_fields
        filename = content[f['file_name']].value
        totalSize = int(content[f['total_size']].value)
        totalChunks = int(content[f['total_chunks']].value)
        chunkSize = int(content[f['chunk_size']].value)
        chunkNumber = int(content[f['chunk_number']].value)
        fileContent = content[f['content']].value

        fileId = filename

        # # prepare user specific upload areas
        # # NOT YET IMPLEMENTED
        # #
        # if 'auth_id' in f and f['auth_id'] in content:
        #     auth_id = content[f['auth_id']].value
        #     mydir = os.path.join(self._dir, auth_id)
        #     my_temp_dir = os.path.join(self._tempDir, auth_id)
        #
        #     # check if auth_id is a valid directory_name
        #     #
        #     if auth_id != auth_id.encode('ascii', 'ignore'):
        #         msg = "The requestor auth_id must be an ascii string."
        #         if self._debug:
        #             log.msg(msg)
        #         # 415 Unsupported Media Type
        #         request.setResponseCode(415, msg)
        #         return msg
        # else:
        #     auth_id = 'anonymous'

        # create user specific folder

        # mydir = self._dir
        # my_temp_dir = self._tempDir

        # if not os.path.exists(mydir):
        #     os.makedirs(mydir)
        # if not os.path.exists(my_temp_dir):
        #     os.makedirs(my_temp_dir)

        if 'on_progress' in f and f[
                'on_progress'] in content and self._fileupload_session != {}:
            topic = content[f['on_progress']].value

            if 'session' in f and f['session'] in content:
                session = int(content[f['session']].value)
                publish_options = PublishOptions(eligible=[session])
            else:
                publish_options = None

            def fileupload_publish(payload):
                self._fileupload_session.publish(topic,
                                                 payload,
                                                 options=publish_options)
        else:

            def fileupload_publish(payload):
                pass

        # Register upload right at the start to avoid overlapping upload conflicts
        if fileId not in self._uploads:
            self._uploads[fileId] = {'chunk_list': {}, 'origin': origin}
            chunk_is_first = True
        else:
            chunk_is_first = False

        self.log.debug(
            'Started upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
            file_name=fileId,
            total_size=totalSize,
            total_chunks=totalChunks,
            chunk_size=chunkSize,
            chunk_number=chunkNumber)

        # check file size
        #
        if totalSize > self._max_file_size:
            msg = "Size {} of file to be uploaded exceeds maximum {}".format(
                totalSize, self._max_file_size)
            self.log.debug(msg)
            # 413 Request Entity Too Large
            request.setResponseCode(413, msg)
            return msg

        # check file extensions
        #
        extension = os.path.splitext(filename)[1]
        if self._fileTypes and extension not in self._fileTypes:
            msg = "Type '{}' of file to be uploaded is in allowed types {}".format(
                extension, self._fileTypes)
            self.log.debug(msg)
            # 415 Unsupported Media Type
            request.setResponseCode(415, msg)
            return msg

        # check if another session is uploading this file already
        # If the chunks are read at startup of crossbar any client may resume the pending upload !
        #
        try:
            upl = self._uploads[fileId]
            if upl['origin'] != origin and upl['origin'] != 'startup':
                msg = "File being uploaded is already uploaded in a different session"
                self.log.debug(msg)
                # 409 Conflict
                request.setResponseCode(409, msg)
                return msg
        except Exception:
            pass

        # TODO: check mime type

        fileTempDir = os.path.join(self._tempDir, fileId)
        chunkName = os.path.join(fileTempDir, 'chunk_' + str(chunkNumber))
        _chunkName = os.path.join(
            fileTempDir,
            '#kfhfkzuru578e38viokbjhfvz4w__' + 'chunk_' + str(chunkNumber))

        if chunk_is_first:
            # first chunk of file

            # clean the temp dir once per file upload
            self._remove_stale_uploads()

            # publish file upload start
            #
            fileupload_publish({
                "id": fileId,
                "chunk": chunkNumber,
                "name": filename,
                "total": totalSize,
                "remaining": totalSize,
                "status": "started",
                "progress": 0.
            })

            if totalChunks == 1:
                # only one chunk overall -> write file directly
                finalFileName = os.path.join(self._dir, fileId)
                _finalFileName = os.path.join(
                    self._dir, '#kfhfkzuru578e38viokbjhfvz4w__' + fileId)

                with open(_finalFileName, 'wb') as finalFile:
                    finalFile.write(fileContent)
                os.rename(_finalFileName, finalFileName)

                self._uploads[fileId]['chunk_list'][chunkNumber] = True

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        os.chmod(finalFileName, perm)
                    except Exception as e:
                        os.remove(finalFileName)
                        msg = "Could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        request.setResponseCode(500, msg)
                        return msg
                    else:
                        self.log.debug(
                            "Changed permissions on {file_name} to {permissions}",
                            file_name=finalFileName,
                            permissions=self._file_permissions)

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                    "id": fileId,
                    "chunk": chunkNumber,
                    "name": filename,
                    "total": totalSize,
                    "remaining": 0,
                    "status": "finished",
                    "progress": 1.
                })
            else:
                # first of more chunks
                os.makedirs(fileTempDir)
                with open(_chunkName, 'wb') as chunk:
                    chunk.write(fileContent)
                os.rename(_chunkName, chunkName)

                self._uploads[fileId]['chunk_list'][chunkNumber] = True

                # publish file upload progress
                #
                fileupload_publish({
                    "id":
                    fileId,
                    "chunk":
                    chunkNumber,
                    "name":
                    filename,
                    "total":
                    totalSize,
                    "remaining":
                    totalSize - chunkSize,
                    "status":
                    "progress",
                    "progress":
                    round(float(chunkSize) / float(totalSize), 3)
                })

        else:
            # intermediate chunk
            with open(_chunkName, 'wb') as chunk:
                chunk.write(fileContent)
            os.rename(_chunkName, chunkName)

            self._uploads[fileId]['chunk_list'][chunkNumber] = True

            received = sum(
                os.path.getsize(os.path.join(fileTempDir, f))
                for f in os.listdir(fileTempDir))

            fileupload_publish({
                "id":
                fileId,
                "chunk":
                chunkNumber,
                "name":
                filename,
                "total":
                totalSize,
                "remaining":
                totalSize - received,
                "status":
                "progress",
                "progress":
                round(float(received) / float(totalSize), 3)
            })

        # every chunk has to check if it is the last chunk written, except in a single chunk scenario
        if totalChunks > 1 and len(
                self._uploads[fileId]['chunk_list']) == totalChunks:
            # last chunk
            self.log.debug('Finished file upload after chunk {chunk_number}',
                           chunk_number=chunkNumber)

            # Merge all files into one file and remove the temp files
            # TODO: How to avoid the extra file IO ?
            finalFileName = os.path.join(self._dir, fileId)
            _finalFileName = os.path.join(
                self._dir, '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
            with open(_finalFileName, 'wb') as finalFile:
                for tfileName in os.listdir(fileTempDir):
                    with open(os.path.join(fileTempDir, tfileName),
                              'r') as tfile:
                        finalFile.write(tfile.read())
            os.rename(_finalFileName, finalFileName)

            if self._file_permissions:
                perm = int(self._file_permissions, 8)
                try:
                    os.chmod(finalFileName, perm)
                except Exception as e:
                    msg = "file upload resource - could not change file permissions of uploaded file"
                    self.log.debug(msg)
                    self.log.debug(e)
                    request.setResponseCode(500, msg)
                    return msg
                else:
                    self.log.debug(
                        "Changed permissions on {file_name} to {permissions}",
                        file_name=finalFileName,
                        permissions=self._file_permissions)

            # publish file upload progress to file_progress_URI
            fileupload_publish({
                "id": fileId,
                "chunk": chunkNumber,
                "name": filename,
                "total": totalSize,
                "remaining": 0,
                "status": "finished",
                "progress": 1.
            })

            # remove the file temp folder
            self._remove_temp_dir(fileTempDir)

            self._uploads.pop(fileId, None)

        request.setResponseCode(200)
        return ''

    def _remove_temp_dir(self, fileTempDir):
        for tfileName in os.listdir(fileTempDir):
            os.remove(os.path.join(fileTempDir, tfileName))

        os.rmdir(fileTempDir)

    def _remove_stale_uploads(self):
        """
        This only works if there is a temp folder exclusive for crossbar file uploads
        if the system temp folder is used then crossbar creates a "crossbar-uploads" there and
        uses that as the temp folder for uploads
        If you don't clean up regularly an attacker could fill up the OS file system
        """
        for _dir in os.listdir(self._tempDir):
            fileTempDir = os.path.join(self._tempDir, _dir)
            if os.path.isdir(fileTempDir) and _dir not in self._uploads:
                self._remove_temp_dir(fileTempDir)

    def render_GET(self, request):
        """
        This method can be used to check wether a chunk has been uploaded already.
        It returns with HTTP status code `200` if yes and `404` if not.
        The request needs to contain the file identifier and the chunk number to check for.
        """
        for param in ['file_name', 'chunk_number']:
            if not self._form_fields[param] in request.args:
                msg = "file upload resource - missing request query parameter '{}', configured from '{}'".format(
                    self._form_fields[param], param)
                self.log.debug(msg)
                # 400 Bad Request
                request.setResponseCode(400, msg)
                return msg

        file_name = request.args[self._form_fields['file_name']][0]
        chunk_number = int(request.args[self._form_fields['chunk_number']][0])

        # a complete upload will be repeated an incomplete upload will be resumed
        if file_name in self._uploads and chunk_number in self._uploads[
                file_name]['chunk_list']:
            self.log.debug(
                "Skipping chunk upload {file_name} of chunk {chunk_number}",
                file_name=file_name,
                chunk_number=chunk_number)
            msg = "chunk of file already uploaded"
            request.setResponseCode(200, msg)
            return msg
        else:
            msg = "chunk of file not yet uploaded"
            request.setResponseCode(404, msg)
            return msg
Ejemplo n.º 48
0
class WampLongPollResourceSessionReceive(Resource):
    """
    A Web resource for receiving via XHR that is part of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
    """

    log = make_logger()

    def __init__(self, parent):
        """

        :param parent: The Web parent resource for the WAMP session.
        :type parent: Instance of :class:`autobahn.twisted.longpoll.WampLongPollResourceSession`.
        """
        Resource.__init__(self)
        self._parent = parent
        self.reactor = self._parent._parent.reactor

        self._queue = deque()
        self._request = None
        self._killed = False

        # FIXME: can we read the loglevel from self.log currently set?
        if False:

            def logqueue():
                if not self._killed:
                    self.log.debug(
                        "WampLongPoll: transport '{0}' - currently polled {1}, pending messages {2}"
                        .format(self._parent._transport_id, self._request
                                is not None, len(self._queue)))
                    self.reactor.callLater(1, logqueue)

            logqueue()

    def queue(self, data):
        """
        Enqueue data to be received by client.

        :param data: The data to be received by the client.
        :type data: bytes
        """
        self._queue.append(data)
        self._trigger()

    def _kill(self):
        """
        Kill any outstanding request.
        """
        if self._request:
            self._request.finish()
            self._request = None
        self._killed = True

    def _trigger(self):
        """
        Trigger batched sending of queued messages.
        """
        if self._request and len(self._queue):

            if self._parent._serializer._serializer._batched:
                # in batched mode, write all pending messages
                while len(self._queue) > 0:
                    msg = self._queue.popleft()
                    self._request.write(msg)
            else:
                # in unbatched mode, only write 1 pending message
                msg = self._queue.popleft()
                if type(msg) == six.binary_type:
                    self._request.write(msg)
                else:
                    self.log.error(
                        "internal error: cannot write data of type {type_} - {msg}",
                        type_=type(msg),
                        msg=msg,
                    )

            self._request.finish()
            self._request = None

    def render_POST(self, request):
        """
        A client receives WAMP messages by issuing a HTTP/POST to this
        Web resource. The request will immediately return when there are
        messages pending to be received. When there are no such messages
        pending, the request will "just hang", until either a message
        arrives to be received or a timeout occurs.
        """
        # remember request, which marks the session as being polled
        self._request = request

        self._parent._parent._setStandardHeaders(request)
        mime_type = self._parent._serializer.MIME_TYPE
        if type(mime_type) == six.text_type:
            mime_type = mime_type.encode('utf8')
        request.setHeader(b'content-type', mime_type)

        def cancel(_):
            self.log.debug(
                "WampLongPoll: poll request for transport '{0}' has gone away".
                format(self._parent._transport_id))
            self._request = None

        request.notifyFinish().addErrback(cancel)

        self._parent._isalive = True
        self._trigger()

        return NOT_DONE_YET
Ejemplo n.º 49
0
class WampLongPollResource(Resource):
    """
    A WAMP-over-Longpoll resource for use with Twisted Web Resource trees.

    This class provides an implementation of the
    `WAMP-over-Longpoll Transport <https://github.com/tavendo/WAMP/blob/master/spec/advanced.md#long-poll-transport>`_
    for WAMP.

    The Resource exposes the following paths (child resources).

    Opening a new WAMP session:

       * ``<base-url>/open``

    Once a transport is created and the session is opened:

       * ``<base-url>/<transport-id>/send``
       * ``<base-url>/<transport-id>/receive``
       * ``<base-url>/<transport-id>/close``
    """

    log = make_logger()

    protocol = WampLongPollResourceSession

    def __init__(self,
                 factory,
                 serializers=None,
                 timeout=10,
                 killAfter=30,
                 queueLimitBytes=128 * 1024,
                 queueLimitMessages=100,
                 debug_transport_id=None,
                 reactor=None):
        """
        Create new HTTP WAMP Web resource.

        :param factory: A (router) session factory.
        :type factory: Instance of :class:`autobahn.twisted.wamp.RouterSessionFactory`.
        :param serializers: List of WAMP serializers.
        :type serializers: list of obj (which implement :class:`autobahn.wamp.interfaces.ISerializer`)
        :param timeout: XHR polling timeout in seconds.
        :type timeout: int
        :param killAfter: Kill WAMP session after inactivity in seconds.
        :type killAfter: int
        :param queueLimitBytes: Kill WAMP session after accumulation of this many bytes in send queue (XHR poll).
        :type queueLimitBytes: int
        :param queueLimitMessages: Kill WAMP session after accumulation of this many message in send queue (XHR poll).
        :type queueLimitMessages: int
        :param debug: Enable debug logging.
        :type debug: bool
        :param debug_transport_id: If given, use this fixed transport ID.
        :type debug_transport_id: str
        :param reactor: The Twisted reactor to run under.
        :type reactor: obj
        """
        Resource.__init__(self)

        # RouterSessionFactory
        self._factory = factory

        # lazy import to avoid reactor install upon module import
        if reactor is None:
            from twisted.internet import reactor
        self.reactor = reactor

        self._debug_transport_id = debug_transport_id
        self._timeout = timeout
        self._killAfter = killAfter
        self._queueLimitBytes = queueLimitBytes
        self._queueLimitMessages = queueLimitMessages

        if serializers is None:
            serializers = []

            # try MsgPack WAMP serializer
            try:
                from autobahn.wamp.serializer import MsgPackSerializer
                serializers.append(MsgPackSerializer(batched=True))
                serializers.append(MsgPackSerializer())
            except ImportError:
                pass

            # try JSON WAMP serializer
            try:
                from autobahn.wamp.serializer import JsonSerializer
                serializers.append(JsonSerializer(batched=True))
                serializers.append(JsonSerializer())
            except ImportError:
                pass

            if not serializers:
                raise Exception("could not import any WAMP serializers")

        self._serializers = {}
        for ser in serializers:
            self._serializers[ser.SERIALIZER_ID] = ser

        self._transports = {}

        # <Base URL>/open
        #
        self.putChild(b"open", WampLongPollResourceOpen(self))

        self.log.debug("WampLongPollResource initialized")

    def render_GET(self, request):
        request.setHeader(b'content-type', b'text/html; charset=UTF-8')
        peer = b"{0}:{1}".format(request.client.host, request.client.port)
        return self.getNotice(peer=peer)

    def getChild(self, name, request):
        """
        Returns send/receive/close resource for transport.

        .. seealso::

           * :class:`twisted.web.resource.Resource`
           * :class:`zipfile.ZipFile`
        """
        if name not in self._transports:
            if name == b'':
                return self
            else:
                return NoResource("no WAMP transport '{0}'".format(name))

        if len(request.postpath) != 1 or request.postpath[0] not in [
                b'send', b'receive', b'close'
        ]:
            return NoResource("invalid WAMP transport operation '{0}'".format(
                request.postpath))

        return self._transports[name]

    def _setStandardHeaders(self, request):
        """
        Set standard HTTP response headers.
        """
        origin = request.getHeader(b"origin")
        if origin is None or origin == b"null":
            origin = b"*"
        request.setHeader(b'access-control-allow-origin', origin)
        request.setHeader(b'access-control-allow-credentials', b'true')
        request.setHeader(b'cache-control',
                          b'no-store, no-cache, must-revalidate, max-age=0')

        headers = request.getHeader(b'access-control-request-headers')
        if headers is not None:
            request.setHeader(b'access-control-allow-headers', headers)

    def _failRequest(self, request, msg):
        """
        Fails a request to the long-poll service.
        """
        self._setStandardHeaders(request)
        request.setHeader(b'content-type', b'text/plain; charset=UTF-8')
        request.setResponseCode(http.BAD_REQUEST)
        return msg

    def getNotice(self, peer, redirectUrl=None, redirectAfter=0):
        """
        Render a user notice (HTML page) when the Long-Poll root resource
        is accessed via HTTP/GET (by a user).

        :param redirectUrl: Optional URL to redirect the user to.
        :type redirectUrl: str
        :param redirectAfter: When ``redirectUrl`` is provided, redirect after this time (seconds).
        :type redirectAfter: int
        """
        from autobahn import __version__

        if redirectUrl:
            redirect = b"""<meta http-equiv="refresh" content="%d;URL='%s'">""" % (
                redirectAfter, redirectUrl)
        else:
            redirect = b""
        html = b"""
<!DOCTYPE html>
<html>
   <head>
      %s
      <style>
         body {
            color: #fff;
            background-color: #027eae;
            font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
            font-size: 16px;
         }

         a, a:visited, a:hover {
            color: #fff;
         }
      </style>
   </head>
   <body>
      <h1>AutobahnPython %s</h1>
      <p>
         I am not Web server, but a <b>WAMP-over-LongPoll</b> transport endpoint.
      </p>
      <p>
         You can talk to me using the <a href="https://github.com/tavendo/WAMP/blob/master/spec/advanced.md#long-poll-transport">WAMP-over-LongPoll</a> protocol.
      </p>
      <p>
         For more information, please see:
         <ul>
            <li><a href="http://wamp.ws/">WAMP</a></li>
            <li><a href="http://autobahn.ws/python">AutobahnPython</a></li>
         </ul>
      </p>
   </body>
</html>
""" % (redirect, __version__)
        return html
Ejemplo n.º 50
0
class WampLongPollResourceSession(Resource):
    """
    A Web resource representing an open WAMP session.
    """

    log = make_logger()

    def __init__(self, parent, transport_details):
        """
        Create a new Web resource representing a WAMP session.

        :param parent: The parent Web resource.
        :type parent: Instance of :class:`autobahn.twisted.longpoll.WampLongPollResource`.
        :param transport_details: Details on the WAMP-over-Longpoll transport session.
        :type transport_details: dict
        """
        Resource.__init__(self)

        self._parent = parent
        self.reactor = self._parent.reactor

        self._transport_id = transport_details['transport']
        self._serializer = transport_details['serializer']
        self._session = None

        # session authentication information
        #
        self._authid = None
        self._authrole = None
        self._authmethod = None
        self._authprovider = None

        self._send = WampLongPollResourceSessionSend(self)
        self._receive = WampLongPollResourceSessionReceive(self)
        self._close = WampLongPollResourceSessionClose(self)

        self.putChild(b"send", self._send)
        self.putChild(b"receive", self._receive)
        self.putChild(b"close", self._close)

        self._isalive = False

        # kill inactive sessions after this timeout
        #
        killAfter = self._parent._killAfter
        if killAfter > 0:

            def killIfDead():
                if not self._isalive:
                    self.log.debug(
                        "WampLongPoll: killing inactive WAMP session with transport '{0}'"
                        .format(self._transport_id))

                    self.onClose(False, 5000, "session inactive")
                    self._receive._kill()
                    if self._transport_id in self._parent._transports:
                        del self._parent._transports[self._transport_id]
                else:
                    self.log.debug(
                        "WampLongPoll: transport '{0}' is still alive".format(
                            self._transport_id))

                    self._isalive = False
                    self.reactor.callLater(killAfter, killIfDead)

            self.reactor.callLater(killAfter, killIfDead)
        else:
            self.log.debug(
                "WampLongPoll: transport '{0}' automatic killing of inactive session disabled"
                .format(self._transport_id))

        self.log.debug(
            "WampLongPoll: session resource for transport '{0}' initialized)".
            format(self._transport_id))

        self.onOpen()

    def close(self):
        """
        Implements :func:`autobahn.wamp.interfaces.ITransport.close`
        """
        if self.isOpen():
            self.onClose(True, 1000, u"session closed")
            self._receive._kill()
            del self._parent._transports[self._transport_id]
        else:
            raise TransportLost()

    def abort(self):
        """
        Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
        """
        if self.isOpen():
            self.onClose(True, 1000, u"session aborted")
            self._receive._kill()
            del self._parent._transports[self._transport_id]
        else:
            raise TransportLost()

    # noinspection PyUnusedLocal
    def onClose(self, wasClean, code, reason):
        """
        Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
        """
        if self._session:
            try:
                self._session.onClose(wasClean)
            except Exception:
                # ignore exceptions raised here, but log ..
                self.log.failure("invoking session's onClose failed")
            self._session = None

    def onOpen(self):
        """
        Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onOpen`
        """
        self._session = self._parent._factory()
        # noinspection PyBroadException
        try:
            self._session.onOpen(self)
        except Exception:
            # ignore exceptions raised here, but log ..
            self.log.failure("Invoking session's onOpen failed")

    def onMessage(self, payload, isBinary):
        """
        Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
        """
        for msg in self._serializer.unserialize(payload, isBinary):
            self.log.debug("WampLongPoll: RX {0}".format(msg))
            self._session.onMessage(msg)

    def send(self, msg):
        """
        Implements :func:`autobahn.wamp.interfaces.ITransport.send`
        """
        if self.isOpen():
            try:
                self.log.debug("WampLongPoll: TX {0}".format(msg))
                payload, isBinary = self._serializer.serialize(msg)
            except Exception as e:
                # all exceptions raised from above should be serialization errors ..
                raise SerializationError(
                    "unable to serialize WAMP application payload ({0})".
                    format(e))
            else:
                self._receive.queue(payload)
        else:
            raise TransportLost()

    def isOpen(self):
        """
        Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
        """
        return self._session is not None
Ejemplo n.º 51
0
class RouterServiceSession(ApplicationSession):
    """
    Router service session which is used internally by a router to
    issue WAMP calls or publish events, and which provides WAMP meta API
    procedures.
    """

    log = make_logger()

    def __init__(self, config, router, schemas=None):
        """
        Ctor.

        :param config: WAMP application component configuration.
        :type config: Instance of :class:`autobahn.wamp.types.ComponentConfig`.
        :param router: The router this service session is running for.
        :type: router: instance of :class:`crossbar.router.session.CrossbarRouter`
        :param schemas: An (optional) initial schema dictionary to load.
        :type schemas: dict
        """
        ApplicationSession.__init__(self, config)
        self._router = router
        self._schemas = {}
        if schemas:
            self._schemas.update(schemas)
            self.log.info("initialized schemas cache with {} entries".format(
                len(self._schemas)))

    @inlineCallbacks
    def onJoin(self, details):
        self.log.debug("Router service session attached: {}".format(details))
        regs = yield self.register(self)
        self.log.debug("Registered {} procedures".format(len(regs)))

    @wamp.register(u'wamp.session.list')
    def session_list(self, filter_authroles=None):
        """
        Get list of session IDs of sessions currently joined on the router.

        :param filter_authroles: If provided, only return sessions with an authrole from this list.
        :type filter_authroles: None or list

        :returns: List of WAMP session IDs (order undefined).
        :rtype: list
        """
        assert (filter_authroles is None or type(filter_authroles) == list)
        session_ids = []
        for session in self._router._session_id_to_session.values():
            if not is_restricted_session(session):
                if filter_authroles is None or session._session_details[
                        'authrole'] in filter_authroles:
                    session_ids.append(session._session_id)
        return session_ids

    @wamp.register(u'wamp.session.count')
    def session_count(self, filter_authroles=None):
        """
        Count sessions currently joined on the router.

        :param filter_authroles: If provided, only count sessions with an authrole from this list.
        :type filter_authroles: None or list

        :returns: Count of joined sessions.
        :rtype: int
        """
        assert (filter_authroles is None or type(filter_authroles) == list)
        session_count = 0
        for session in self._router._session_id_to_session.values():
            if not is_restricted_session(session):
                if filter_authroles is None or session._session_details[
                        'authrole'] in filter_authroles:
                    session_count += 1
        return session_count

    @wamp.register(u'wamp.session.get')
    def session_get(self, session_id):
        """
        Get details for given session.

        :param session_id: The WAMP session ID to retrieve details for.
        :type session_id: int

        :returns: WAMP session details.
        :rtype: dict or None
        """
        if session_id in self._router._session_id_to_session:
            session = self._router._session_id_to_session[session_id]
            if not is_restricted_session(session):
                return session._session_details
        raise ApplicationError(
            ApplicationError.NO_SUCH_SESSION,
            message="no session with ID {} exists on this router".format(
                session_id))

    @wamp.register(u'wamp.session.kill')
    def session_kill(self, session_id, reason=None, message=None):
        """
        Forcefully kill a session.

        :param session_id: The WAMP session ID of the session to kill.
        :type session_id: int
        :param reason: A reason URI provided to the killed session.
        :type reason: unicode or None
        """
        if session_id in self._router._session_id_to_session:
            session = self._router._session_id_to_session[session_id]
            if not is_restricted_session(session):
                session.leave(reason=reason, message=message)
                return
        raise ApplicationError(
            ApplicationError.NO_SUCH_SESSION,
            message="no session with ID {} exists on this router".format(
                session_id))

    @wamp.register(u'wamp.registration.remove_callee')
    def registration_remove_callee(self,
                                   registration_id,
                                   callee_id,
                                   reason=None):
        """
        Forcefully remove callee from registration.

        :param registration_id: The ID of the registration to remove the callee from.
        :type registration_id: int
        :param callee_id: The WAMP session ID of the callee to remove.
        :type callee_id: int
        """
        callee = self._router._session_id_to_session.get(callee_id, None)

        if not callee:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SESSION,
                message="no session with ID {} exists on this router".format(
                    callee_id))

        registration = self._router._dealer._registration_map.get_observation_by_id(
            registration_id)
        if registration and not is_protected_uri(registration.uri):

            if callee not in registration.observers:
                raise ApplicationError(
                    ApplicationError.NO_SUCH_REGISTRATION,
                    message=
                    "session {} is not registered on registration {} on this dealer"
                    .format(callee_id, registration_id))

            self._router._dealer.removeCallee(registration,
                                              callee,
                                              reason=reason)
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_REGISTRATION,
                message="no registration with ID {} exists on this dealer".
                format(registration_id))

    @wamp.register(u'wamp.subscription.remove_subscriber')
    def subscription_remove_subscriber(self,
                                       subscription_id,
                                       subscriber_id,
                                       reason=None):
        """
        Forcefully remove subscriber from subscription.

        :param subscription_id: The ID of the subscription to remove the subscriber from.
        :type subscription_id: int
        :param subscriber_id: The WAMP session ID of the subscriber to remove.
        :type subscriber_id: int
        """
        subscriber = self._router._session_id_to_session.get(
            subscriber_id, None)

        if not subscriber:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SESSION,
                message="no session with ID {} exists on this router".format(
                    subscriber_id))

        subscription = self._router._broker._subscription_map.get_observation_by_id(
            subscription_id)
        if subscription and not is_protected_uri(subscription.uri):

            if subscriber not in subscription.observers:
                raise ApplicationError(
                    ApplicationError.NO_SUCH_SUBSCRIPTION,
                    message=
                    "session {} is not subscribed on subscription {} on this broker"
                    .format(subscriber_id, subscription_id))

            self._router._broker.removeSubscriber(subscription,
                                                  subscriber,
                                                  reason=reason)
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SUBSCRIPTION,
                message="no subscription with ID {} exists on this broker".
                format(subscription_id))

    @wamp.register(u'wamp.registration.get')
    def registration_get(self, registration_id):
        """
        Get registration details.

        :param registration_id: The ID of the registration to retrieve.
        :type registration_id: int

        :returns: The registration details.
        :rtype: dict
        """
        registration = self._router._dealer._registration_map.get_observation_by_id(
            registration_id)
        if registration and not is_protected_uri(registration.uri):
            registration_details = {
                'id': registration.id,
                'created': registration.created,
                'uri': registration.uri,
                'match': registration.match,
                'invoke': registration.extra.invoke,
            }
            return registration_details
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_REGISTRATION,
                message="no registration with ID {} exists on this dealer".
                format(registration_id))

    @wamp.register(u'wamp.subscription.get')
    def subscription_get(self, subscription_id):
        """
        Get subscription details.

        :param subscription_id: The ID of the subscription to retrieve.
        :type subscription_id: int

        :returns: The subscription details.
        :rtype: dict
        """
        subscription = self._router._broker._subscription_map.get_observation_by_id(
            subscription_id)
        if subscription and not is_protected_uri(subscription.uri):
            subscription_details = {
                'id': subscription.id,
                'created': subscription.created,
                'uri': subscription.uri,
                'match': subscription.match,
            }
            return subscription_details
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SUBSCRIPTION,
                message="no subscription with ID {} exists on this broker".
                format(subscription_id))

    @wamp.register(u'wamp.registration.list')
    def registration_list(self):
        """
        List current registrations.

        :returns: A dictionary with three entries for the match policies 'exact', 'prefix'
            and 'wildcard', with a list of registration IDs for each.
        :rtype: dict
        """
        registration_map = self._router._dealer._registration_map

        registrations_exact = []
        for registration in registration_map._observations_exact.values():
            if not is_protected_uri(registration.uri):
                registrations_exact.append(registration.id)

        registrations_prefix = []
        for registration in registration_map._observations_prefix.values():
            if not is_protected_uri(registration.uri):
                registrations_prefix.append(registration.id)

        registrations_wildcard = []
        for registration in registration_map._observations_wildcard.values():
            if not is_protected_uri(registration.uri):
                registrations_wildcard.append(registration.id)

        return {
            'exact': registrations_exact,
            'prefix': registrations_prefix,
            'wildcard': registrations_wildcard,
        }

    @wamp.register(u'wamp.subscription.list')
    def subscription_list(self):
        """
        List current subscriptions.

        :returns: A dictionary with three entries for the match policies 'exact', 'prefix'
            and 'wildcard', with a list of subscription IDs for each.
        :rtype: dict
        """
        subscription_map = self._router._broker._subscription_map

        subscriptions_exact = []
        for subscription in subscription_map._observations_exact.values():
            if not is_protected_uri(subscription.uri):
                subscriptions_exact.append(subscription.id)

        subscriptions_prefix = []
        for subscription in subscription_map._observations_prefix.values():
            if not is_protected_uri(subscription.uri):
                subscriptions_prefix.append(subscription.id)

        subscriptions_wildcard = []
        for subscription in subscription_map._observations_wildcard.values():
            if not is_protected_uri(subscription.uri):
                subscriptions_wildcard.append(subscription.id)

        return {
            'exact': subscriptions_exact,
            'prefix': subscriptions_prefix,
            'wildcard': subscriptions_wildcard,
        }

    @wamp.register(u'wamp.registration.match')
    def registration_match(self, procedure):
        """
        Given a procedure URI, return the registration best matching the procedure.

        This essentially models what a dealer does for dispatching an incoming call.

        :param procedure: The procedure to match.
        :type procedure: unicode

        :returns: The best matching registration or ``None``.
        :rtype: obj or None
        """
        registration = self._router._dealer._registration_map.best_matching_observation(
            procedure)
        if registration and not is_protected_uri(registration.uri):
            return registration.id
        else:
            return None

    @wamp.register(u'wamp.subscription.match')
    def subscription_match(self, topic):
        """
        Given a topic URI, returns all subscriptions matching the topic.

        This essentially models what a broker does for dispatching an incoming publication.

        :param topic: The topic to match.
        :type topic: unicode

        :returns: All matching subscriptions or ``None``.
        :rtype: obj or None
        """
        subscriptions = self._router._broker._subscription_map.match_observations(
            topic)
        if subscriptions:
            subscription_ids = []
            for subscription in subscriptions:
                if not is_protected_uri(subscription.uri):
                    subscription_ids.append(subscription.id)
            if subscription_ids:
                return subscription_ids
            else:
                return None
        else:
            return None

    @wamp.register(u'wamp.registration.lookup')
    def registration_lookup(self, procedure, options=None):
        """
        Given a procedure URI (and options), return the registration (if any) managing the procedure.

        This essentially models what a dealer does when registering for a procedure.

        :param procedure: The procedure to lookup the registration for.
        :type procedure: unicode
        :param options: Same options as when registering a procedure.
        :type options: dict or None

        :returns: The ID of the registration managing the procedure or ``None``.
        :rtype: int or None
        """
        options = options or {}
        match = options.get('match', u'exact')
        registration = self._router._dealer._registration_map.get_observation(
            procedure, match)
        if registration and not is_protected_uri(registration.uri):
            return registration.id
        else:
            return None

    @wamp.register(u'wamp.subscription.lookup')
    def subscription_lookup(self, topic, options=None):
        """
        Given a topic URI (and options), return the subscription (if any) managing the topic.

        This essentially models what a broker does when subscribing for a topic.

        :param topic: The topic to lookup the subscription for.
        :type topic: unicode
        :param options: Same options as when subscribing to a topic.
        :type options: dict or None

        :returns: The ID of the subscription managing the topic or ``None``.
        :rtype: int or None
        """
        options = options or {}
        match = options.get('match', u'exact')
        subscription = self._router._broker._subscription_map.get_observation(
            topic, match)
        if subscription and not is_protected_uri(subscription.uri):
            return subscription.id
        else:
            return None

    @wamp.register(u'wamp.registration.list_callees')
    def registration_list_callees(self, registration_id):
        """
        Retrieve list of callees (WAMP session IDs) registered on (attached to) a registration.

        :param registration_id: The ID of the registration to get callees for.
        :type registration_id: int

        :returns: A list of WAMP session IDs of callees currently attached to the registration.
        :rtype: list
        """
        registration = self._router._dealer._registration_map.get_observation_by_id(
            registration_id)
        if registration and not is_protected_uri(registration.uri):
            session_ids = []
            for callee in registration.observers:
                session_ids.append(callee._session_id)
            return session_ids
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_REGISTRATION,
                message="no registration with ID {} exists on this dealer".
                format(registration_id))

    @wamp.register(u'wamp.subscription.list_subscribers')
    def subscription_list_subscribers(self, subscription_id):
        """
        Retrieve list of subscribers (WAMP session IDs) subscribed on (attached to) a subscription.

        :param subscription_id: The ID of the subscription to get subscribers for.
        :type subscription_id: int

        :returns: A list of WAMP session IDs of subscribers currently attached to the subscription.
        :rtype: list
        """
        subscription = self._router._broker._subscription_map.get_observation_by_id(
            subscription_id)
        if subscription and not is_protected_uri(subscription.uri):
            session_ids = []
            for subscriber in subscription.observers:
                session_ids.append(subscriber._session_id)
            return session_ids
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SUBSCRIPTION,
                message="no subscription with ID {} exists on this broker".
                format(subscription_id))

    @wamp.register(u'wamp.registration.count_callees')
    def registration_count_callees(self, registration_id):
        """
        Retrieve number of callees registered on (attached to) a registration.

        :param registration_id: The ID of the registration to get the number of callees for.
        :type registration_id: int

        :returns: Number of callees currently attached to the registration.
        :rtype: int
        """
        registration = self._router._dealer._registration_map.get_observation_by_id(
            registration_id)
        if registration and not is_protected_uri(registration.uri):
            return len(registration.observers)
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_REGISTRATION,
                message="no registration with ID {} exists on this dealer".
                format(registration_id))

    @wamp.register(u'wamp.subscription.count_subscribers')
    def subscription_count_subscribers(self, subscription_id):
        """
        Retrieve number of subscribers subscribed on (attached to) a subscription.

        :param subscription_id: The ID of the subscription to get the number subscribers for.
        :type subscription_id: int

        :returns: Number of subscribers currently attached to the subscription.
        :rtype: int
        """
        subscription = self._router._broker._subscription_map.get_observation_by_id(
            subscription_id)
        if subscription and not is_protected_uri(subscription.uri):
            return len(subscription.observers)
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SUBSCRIPTION,
                message="no subscription with ID {} exists on this broker".
                format(subscription_id))

    @wamp.register(u'wamp.subscription.get_events')
    def subscription_get_events(self, subscription_id, limit=10):
        """
        Return history of events for given subscription.

        :param subscription_id: The ID of the subscription to get events for.
        :type subscription_id: int
        :param limit: Return at most this many events.
        :type limit: int

        :returns: List of events.
        :rtype: list
        """
        self.log.info("subscription_get_events({subscription_id}, {limit})",
                      subscription_id=subscription_id,
                      limit=limit)
        subscription = self._router._broker._subscription_map.get_observation_by_id(
            subscription_id)
        if subscription and not is_protected_uri(subscription.uri):
            events = self._router._broker._event_store.get_events(
                subscription_id, limit)
            if events is None:
                raise Exception(
                    "event history for the given subscription is unavailable")
                raise ApplicationError(
                    u'wamp.error.history_unavailable',
                    message=
                    "event history for the given subscription is unavailable")
            else:
                return events
        else:
            raise ApplicationError(
                ApplicationError.NO_SUCH_SUBSCRIPTION,
                message="no subscription with ID {} exists on this broker".
                format(subscription_id))

    @wamp.register(u'wamp.test.exception')
    def test_exception(self):
        raise ApplicationError(u'wamp.error.history_unavailable')

    @wamp.register(u'wamp.schema.describe')
    def schema_describe(self, uri=None):
        """
        Describe a given URI or all URIs.

        :param uri: The URI to describe or ``None`` to retrieve all declarations.
        :type uri: unicode

        :returns: A list of WAMP schema declarations.
        :rtype: list
        """
        if uri:
            return self._schemas.get(uri, None)
        else:
            return self._schemas

    @wamp.register(u'wamp.schema.define')
    def schema_define(self, uri, schema):
        """
        Declare metadata for a given URI.

        :param uri: The URI for which to declare metadata.
        :type uri: unicode
        :param schema: The WAMP schema declaration for
           the URI or `None` to remove any declarations for the URI.
        :type schema: dict

        :returns: ``None`` if declaration was unchanged, ``True`` if
           declaration was new, ``False`` if declaration existed, but was modified.
        :rtype: bool or None
        """
        if not schema:
            if uri in self._schemas:
                del self._schemas
                self.publish(u'wamp.schema.on_undefine', uri)
                return uri
            else:
                return None

        if uri not in self._schemas:
            was_new = True
            was_modified = False
        else:
            was_new = False
            if json.dumps(schema) != json.dumps(self._schemas[uri]):
                was_modified = True
            else:
                was_modified = False

        if was_new or was_modified:
            self._schemas[uri] = schema
            self.publish(u'wamp.schema.on_define', uri, schema, was_new)
            return was_new
        else:
            return None
Ejemplo n.º 52
0
 def __init__(self):
     self.log = make_logger("info", logger=Mock)
Ejemplo n.º 53
0
class MemoryEventStore(object):
    """
    Event store in-memory implementation.
    """

    log = make_logger()

    def __init__(self, config=None):
        # whole store configuration
        self._config = config or {}

        # limit to event history per subscription
        self._limit = self._config.get('limit', 1000)

        # map of publication ID -> event dict
        self._event_store = {}

        # map of publication ID -> set of subscription IDs
        self._event_subscriptions = {}

        # map of subscription ID -> deque of publication IDs
        self._event_history = {}

    def attach_subscription_map(self, subscription_map):
        # example topic being configured as persistent
        for sub in self._config.get('event-history', []):
            # FIXME: limit = sub.get('limit', self._limit)
            subscription_map.add_observer(self,
                                          uri=sub['uri'],
                                          match=sub.get('match', u'exact'))

    def store_event(self,
                    publisher_id,
                    publication_id,
                    topic,
                    args=None,
                    kwargs=None):
        """
        Persist the given event to history.

        :param publisher_id: The session ID of the publisher of the event being persisted.
        :type publisher_id: int
        :param publication_id: The publication ID of the event.
        :type publisher_id: int
        :param topic: The topic URI of the event.
        :type topic: unicode
        :param args: The args payload of the event.
        :type args: list or None
        :param kwargs: The kwargs payload of the event.
        :type kwargs: dict or None
        """
        assert (publication_id not in self._event_store)
        evt = {
            'timestamp': utcnow(),
            'publisher': publisher_id,
            'publication': publication_id,
            'topic': topic,
            'args': args,
            'kwargs': kwargs
        }
        self._event_store[publication_id] = evt
        self.log.debug("event {publication_id} persisted",
                       publication_id=publication_id)

    def store_event_history(self, publication_id, subscription_id):
        """
        Persist the given publication history to subscriptions.

        :param publication_id: The ID of the event publication to be persisted.
        :type publication_id: int
        :param subscription_id: The ID of the subscription the event (identified by the publication ID),
            was published to, because the event's topic matched the subscription.
        :type subscription_id: int
        """
        assert (publication_id in self._event_store)

        # for in-memory history, we just use a double-ended queue
        if subscription_id not in self._event_history:
            self._event_history[subscription_id] = deque()

        # append event to history
        self._event_history[subscription_id].append(publication_id)

        if publication_id not in self._event_subscriptions:
            self._event_subscriptions[publication_id] = set()

        self._event_subscriptions[publication_id].add(subscription_id)

        self.log.debug(
            "event {publication_id} history persisted for subscription {subscription_id}",
            publication_id=publication_id,
            subscription_id=subscription_id)

        # purge history if over limit
        if len(self._event_history[subscription_id]) > self._limit:

            # remove leftmost event from history
            purged_publication_id = self._event_history[
                subscription_id].popleft()

            # remove the purged publication from event subscriptions
            self._event_subscriptions[purged_publication_id].remove(
                subscription_id)

            self.log.debug(
                "event {publication_id} purged fom history for subscription {subscription_id}",
                publication_id=purged_publication_id,
                subscription_id=subscription_id)

            # if no more event subscriptions exist for publication, remove that too
            if not self._event_subscriptions[purged_publication_id]:
                del self._event_subscriptions[purged_publication_id]
                del self._event_store[purged_publication_id]
                self.log.debug("event {publication_id} purged completey",
                               publication_id=purged_publication_id)

    def get_events(self, subscription_id, limit):
        """
        Retrieve given number of last events for a given subscription.

        If no history is maintained for the given subscription, None is returned.

        :param subscription_id: The ID of the subscription to retrieve events for.
        :type subscription_id: int
        :param limit: Limit number of events returned.
        :type limit: int

        :return: List of events.
        :rtype: list or None
        """
        if subscription_id not in self._event_history:
            return None
        else:
            s = self._event_history[subscription_id]

            # at most "limit" events in reverse chronological order
            res = []
            i = -1
            if limit > len(s):
                limit = len(s)
            for _ in range(limit):
                res.append(self._event_store[s[i]])
                i -= 1
            return res

    def get_event_history(self, subscription_id, from_ts, until_ts):
        """
        Retrieve event history for time range for a given subscription.

        If no history is maintained for the given subscription, None is returned.

        :param subscription_id: The ID of the subscription to retrieve events for.
        :type subscription_id: int
        :param from_ts: Filter events from this date (string in ISO-8601 format).
        :type from_ts: unicode
        :param until_ts: Filter events until this date (string in ISO-8601 format).
        :type until_ts: unicode
        """
        raise Exception("not implemented")
Ejemplo n.º 54
0
class RouterFactory(object):
    """
    Crossbar.io core router factory.
    """
    log = make_logger()
    router = Router
    """
    The router class this factory will create router instances from.
    """
    def __init__(self, node_id, options=None):
        """

        :param options: Default router options.
        :type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
        """
        assert (type(node_id) == six.text_type)
        self._node_id = node_id
        self._routers = {}
        self._options = options or RouterOptions(
            uri_check=RouterOptions.URI_CHECK_LOOSE)
        self._auto_create_realms = False

    def get(self, realm):
        """
        Implements :func:`autobahn.wamp.interfaces.IRouterFactory.get`
        """
        if self._auto_create_realms:
            if realm not in self._routers:
                self._routers[realm] = self.router(self, realm, self._options)
                self.log.debug("Router created for realm '{realm}'",
                               realm=realm)
            return self._routers[realm]
        else:
            return self._routers[realm]

    def __getitem__(self, realm):
        return self._routers[realm]

    def __contains__(self, realm):
        return realm in self._routers

    def onLastDetach(self, router):
        assert (router.realm in self._routers)
        del self._routers[router.realm]
        self.log.debug("Router destroyed for realm '{realm}'",
                       realm=router.realm)

    def start_realm(self, realm):
        """
        Starts a realm on this router.

        :param realm: The realm to start.
        :type realm: instance of :class:`crossbar.worker.router.RouterRealm`.

        :returns: The router instance for the started realm.
        :rtype: instance of :class:`crossbar.router.session.CrossbarRouter`
        """
        self.log.debug("CrossbarRouterFactory.start_realm(realm = {realm})",
                       realm=realm)

        # get name of realm (an URI in general)
        #
        uri = realm.config['name']
        assert (uri not in self._routers)

        # if configuration of realm contains a "store" item, set up a
        # realm store as appropriate ..
        store = None
        if 'store' in realm.config:
            store_config = realm.config['store']

            if store_config['type'] == 'lmdb':
                # if LMDB is available, and a realm store / database is configured,
                # create an LMDB environment
                if not HAS_LMDB:
                    raise Exception("LDMB not available")
                store = LmdbRealmStore(store_config)

            elif store_config['type'] == 'memory':
                store = MemoryRealmStore(store_config)
            else:
                raise Exception('logic error')

        # now create a router for the realm
        #
        router = Router(self, realm, self._options, store=store)
        self._routers[uri] = router
        self.log.debug("Router created for realm '{uri}'", uri=uri)

        return router

    def stop_realm(self, realm):
        self.log.debug("CrossbarRouterFactory.stop_realm(realm = {realm})",
                       realm=realm)

    def add_role(self, realm, config):
        self.log.debug(
            "CrossbarRouterFactory.add_role(realm = {realm}, config = {config})",
            realm=realm,
            config=config)

        assert (type(realm) == six.text_type)
        assert (realm in self._routers)

        router = self._routers[realm]
        uri = config[u'name']

        if u'permissions' in config:
            role = RouterRoleStaticAuth(router, uri, config[u'permissions'])
        elif u'authorizer' in config:
            role = RouterRoleDynamicAuth(router, uri, config[u'authorizer'])
        else:
            allow_by_default = config.get(u'allow-by-default', False)
            role = RouterRole(router, uri, allow_by_default=allow_by_default)

        router.add_role(role)

    def drop_role(self, realm, role):
        self.log.debug(
            "CrossbarRouterFactory.drop_role(realm = {realm}, role = {role})",
            realm=realm,
            role=role)
Ejemplo n.º 55
0
class JsonResource(Resource):
    """
    Static Twisted Web resource that renders to a JSON document.
    """
    log = make_logger()

    def __init__(self, value, options=None):
        Resource.__init__(self)
        options = options or {}

        if options.get('prettify', False):
            self._data = json.dumps(value,
                                    sort_keys=True,
                                    indent=3,
                                    ensure_ascii=False)
        else:
            self._data = json.dumps(value,
                                    separators=(',', ':'),
                                    ensure_ascii=False)

        # Twisted Web render_METHOD methods are expected to return a byte string
        self._data = self._data.encode('utf8')

        self._allow_cross_origin = options.get('allow_cross_origin', True)
        self._discourage_caching = options.get('discourage_caching', False)

        # number of HTTP/GET requests we served from this resource
        #
        self._requests_served = 0

    def render_GET(self, request):
        # we produce JSON: set correct response content type
        #
        # note: both args to request.setHeader are supposed to be byte strings
        # https://twistedmatrix.com/documents/current/api/twisted.web.http.Request.html#setHeader
        #
        request.setHeader(b'content-type', b'application/json; charset=utf8-8')

        # set response headers for cross-origin requests
        #
        if self._allow_cross_origin:
            origin = request.getHeader(b'origin')
            if origin is None or origin == b'null':
                origin = b'*'
            request.setHeader(b'access-control-allow-origin', origin)
            request.setHeader(b'access-control-allow-credentials', b'true')

            headers = request.getHeader(b'access-control-request-headers')
            if headers is not None:
                request.setHeader(b'access-control-allow-headers', headers)

        # set response headers to disallow caching
        #
        if self._discourage_caching:
            request.setHeader(
                b'cache-control',
                b'no-store, no-cache, must-revalidate, max-age=0')

        self._requests_served += 1
        if self._requests_served % 10000 == 0:
            self.log.debug("Served {requests_served} requests",
                           requests_served=self._requests_served)

        return self._data
Ejemplo n.º 56
0
class NodeControllerSession(NativeProcessSession):
    """
    Singleton node WAMP session hooked up to the node management router.

    This class exposes the node's management API.
    """

    log = make_logger()

    def __init__(self, node):
        """
        :param node: The node singleton for this node controller session.
        :type node: obj
        """
        NativeProcessSession.__init__(self, reactor=node._reactor)

        # associated node
        self._node = node
        self._node_id = node._node_id
        self._realm = node._realm

        self.cbdir = self._node._cbdir

        self._started = None
        self._pid = os.getpid()

        # map of worker processes: worker_id -> NativeWorkerProcess
        self._workers = {}

        self._shutdown_requested = False

    def onConnect(self):

        self.log.debug("Connected to node management router")

        # self._uri_prefix = u'crossbar.node.{}'.format(self.config.extra.node)
        self._uri_prefix = u'crossbar.node.{}'.format(self._node_id)

        NativeProcessSession.onConnect(self, False)

        # self.join(self.config.realm)
        self.join(self._realm)

    @inlineCallbacks
    def onJoin(self, details):

        self.log.info("Joined realm '{realm}' on node management router",
                      realm=details.realm)

        # When a (native) worker process has connected back to the router of
        # the node controller, the worker will publish this event
        # to signal it's readyness.
        #
        def on_worker_ready(res):
            id = res['id']
            if id in self._workers:
                ready = self._workers[id].ready
                if not ready.called:
                    # fire the Deferred previously stored for
                    # signaling "worker ready"
                    ready.callback(id)
                else:
                    self.log.error(
                        "Internal error: on_worker_ready() fired for process {process}, but already called earlier",
                        process=id)
            else:
                self.log.error(
                    "Internal error: on_worker_ready() fired for process {process}, but no process with that ID",
                    process=id)

        self.subscribe(
            on_worker_ready,
            'crossbar.node.{}.on_worker_ready'.format(self._node_id))

        yield NativeProcessSession.onJoin(self, details)

        # register node controller procedures: 'crossbar.node.<ID>.<PROCEDURE>'
        #
        procs = [
            'get_info',
            'shutdown',
            'get_workers',
            'get_worker_log',
            'start_router',
            'stop_router',
            'start_container',
            'stop_container',
            'start_guest',
            'stop_guest',
            'start_websocket_testee',
            'stop_websocket_testee',
        ]

        dl = []
        for proc in procs:
            uri = '{}.{}'.format(self._uri_prefix, proc)
            self.log.debug("Registering management API procedure {proc}",
                           proc=uri)
            dl.append(
                self.register(getattr(self, proc),
                              uri,
                              options=RegisterOptions(details_arg='details')))

        regs = yield DeferredList(dl)

        self.log.debug("Registered {cnt} management API procedures",
                       cnt=len(regs))

        self._started = utcnow()

        self.publish(u"crossbar.node.on_ready", self._node_id)

        self.log.debug("Node controller ready")

    def get_info(self, details=None):
        """
        Return basic information about this node.

        :returns: Information on the Crossbar.io node.
        :rtype: dict
        """
        return {
            'started': self._started,
            'pid': self._pid,
            'workers': len(self._workers),
            'directory': self.cbdir,
            'wamplets': self._get_wamplets()
        }

    @inlineCallbacks
    def shutdown(self, restart=False, mode=None, details=None):
        """
        Stop this node.
        """
        if self._shutdown_requested:
            # we're already shutting down .. ignore ..
            return

        self._shutdown_requested = True

        self.log.warn("Shutting down node...")

        # publish management API event
        shutdown_info = {
            'restart': restart,
            'mode': mode,
            'who': details.caller if details else None,
            'when': utcnow()
        }
        yield self.publish('crossbar.node.{}.on_shutdown'.format(
            self._node_id),
                           shutdown_info,
                           options=PublishOptions(
                               exclude=[details.caller] if details else None,
                               acknowledge=True))

        def stop_reactor():
            try:
                self._reactor.stop()
            except ReactorNotRunning:
                pass

        self._reactor.callLater(0, stop_reactor)

        returnValue(shutdown_info)

    def _get_wamplets(self):
        """
        List installed WAMPlets.
        """
        res = []

        for entrypoint in pkg_resources.iter_entry_points(
                'autobahn.twisted.wamplet'):
            try:
                e = entrypoint.load()
            except Exception as e:
                pass
            else:
                ep = {}
                ep['dist'] = entrypoint.dist.key
                ep['version'] = entrypoint.dist.version
                ep['location'] = entrypoint.dist.location
                ep['name'] = entrypoint.name
                ep['module_name'] = entrypoint.module_name
                ep['entry_point'] = str(entrypoint)

                if hasattr(e, '__doc__') and e.__doc__:
                    ep['doc'] = e.__doc__.strip()
                else:
                    ep['doc'] = None

                ep['meta'] = e(None)

                res.append(ep)

        return sorted(res)

    def get_workers(self, details=None):
        """
        Returns the list of workers currently running on this node.

        :returns: List of worker processes.
        :rtype: list of dicts
        """
        now = datetime.utcnow()
        res = []
        for worker in sorted(self._workers.values(), key=lambda w: w.created):
            res.append({
                'id':
                worker.id,
                'pid':
                worker.pid,
                'type':
                worker.TYPE,
                'status':
                worker.status,
                'created':
                utcstr(worker.created),
                'started':
                utcstr(worker.started),
                'startup_time':
                (worker.started -
                 worker.created).total_seconds() if worker.started else None,
                'uptime':
                (now -
                 worker.started).total_seconds() if worker.started else None,
            })
        return res

    def get_worker_log(self, id, limit=None, details=None):
        """
        Get buffered log for a worker.

        :param limit: Optionally, limit the amount of log entries returned
           to the last N entries.
        :type limit: None or int

        :return: Buffered log for worker.
        :rtype: list
        """
        if id not in self._workers:
            emsg = "No worker with ID '{}'".format(id)
            raise ApplicationError(u'crossbar.error.no_such_worker', emsg)

        return self._workers[id].getlog(limit)

    def start_router(self, id, options=None, details=None):
        """
        Start a new router worker: a Crossbar.io native worker process
        that runs a WAMP router.

        :param id: The worker ID to start this router with.
        :type id: str
        :param options: The router worker options.
        :type options: dict
        """
        self.log.debug(
            "NodeControllerSession.start_router({id}, options={options})",
            id=id,
            options=options)

        return self._start_native_worker('router',
                                         id,
                                         options,
                                         details=details)

    def start_container(self, id, options=None, details=None):
        """
        Start a new container worker: a Crossbar.io native worker process
        that can host WAMP application components written in Python.

        :param id: The worker ID to start this container with.
        :type id: str
        :param options: The container worker options.
        :type options: dict
        """
        self.log.debug(
            "NodeControllerSession.start_container(id = {id}, options = {options})",
            id=id,
            options=options)

        return self._start_native_worker('container',
                                         id,
                                         options,
                                         details=details)

    def start_websocket_testee(self, id, options=None, details=None):
        """
        Start a new websocket-testee worker: a Crossbar.io native worker process
        that runs a plain echo'ing WebSocket server.

        :param id: The worker ID to start this router with.
        :type id: str
        :param options: The worker options.
        :type options: dict
        """
        self.log.debug(
            "NodeControllerSession.start_websocket_testee({id}, options={options})",
            id=id,
            options=options)

        return self._start_native_worker('websocket-testee',
                                         id,
                                         options,
                                         details=details)

    def _start_native_worker(self, wtype, id, options=None, details=None):

        assert (wtype in ['router', 'container', 'websocket-testee'])

        # prohibit starting a worker twice
        #
        if id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(
                id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running',
                                   emsg)

        # check worker options
        #
        options = options or {}
        try:
            if wtype == 'router':
                checkconfig.check_router_options(options)
            elif wtype == 'container':
                checkconfig.check_container_options(options)
            elif wtype == 'websocket-testee':
                checkconfig.check_websocket_testee_options(options)
            else:
                raise Exception("logic error")
        except Exception as e:
            emsg = "Could not start native worker: invalid configuration ({})".format(
                e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration',
                                   emsg)

        # allow override Python executable from options
        #
        if 'python' in options:
            exe = options['python']

            # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy
            #
            if not os.path.isabs(exe):
                emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)

            # of course the path must exist and actually be executable
            #
            if not (os.path.isfile(exe) and os.access(exe, os.X_OK)):
                emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)
        else:
            exe = sys.executable

        # all native workers (routers and containers for now) start from the same script
        #
        filename = os.path.abspath(
            os.path.join(crossbar.__file__, "..", "worker", "process.py"))

        # assemble command line for forking the worker
        #
        args = [exe, "-u", filename]
        args.extend(["--cbdir", self._node._cbdir])
        args.extend(["--node", str(self._node_id)])
        args.extend(["--worker", str(id)])
        args.extend(["--realm", self._realm])
        args.extend(["--type", wtype])
        args.extend(["--loglevel", _loglevel])

        # allow override worker process title from options
        #
        if options.get('title', None):
            args.extend(['--title', options['title']])

        # forward explicit reactor selection
        #
        if 'reactor' in options and sys.platform in options['reactor']:
            args.extend(['--reactor', options['reactor'][sys.platform]])
        # FIXME
        # elif self._node.options.reactor:
        #    args.extend(['--reactor', self._node.options.reactor])

        # create worker process environment
        #
        worker_env = create_process_env(options)

        # We need to use the same PYTHONPATH we were started with, so we can
        # find the Crossbar we're working with -- it may not be the same as the
        # one on the default path
        worker_env["PYTHONPATH"] = os.pathsep.join(sys.path)

        # log name of worker
        #
        worker_logname = {
            'router': 'Router',
            'container': 'Container',
            'websocket-testee': 'WebSocketTestee'
        }.get(wtype, 'Worker')

        # topic URIs used (later)
        #
        if wtype == 'router':
            starting_topic = 'crossbar.node.{}.on_router_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_router_started'.format(
                self._node_id)
        elif wtype == 'container':
            starting_topic = 'crossbar.node.{}.on_container_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_container_started'.format(
                self._node_id)
        elif wtype == 'websocket-testee':
            starting_topic = 'crossbar.node.{}.on_websocket_testee_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_websocket_testee_started'.format(
                self._node_id)
        else:
            raise Exception("logic error")

        # add worker tracking instance to the worker map ..
        #
        if wtype == 'router':
            worker = RouterWorkerProcess(self,
                                         id,
                                         details.caller,
                                         keeplog=options.get(
                                             'traceback', None))
        elif wtype == 'container':
            worker = ContainerWorkerProcess(self,
                                            id,
                                            details.caller,
                                            keeplog=options.get(
                                                'traceback', None))
        elif wtype == 'websocket-testee':
            worker = WebSocketTesteeWorkerProcess(self,
                                                  id,
                                                  details.caller,
                                                  keeplog=options.get(
                                                      'traceback', None))
        else:
            raise Exception("logic error")

        self._workers[id] = worker

        # create a (custom) process endpoint.
        #
        if platform.isWindows():
            childFDs = None  # Use the default Twisted ones
        else:
            # The communication between controller and container workers is
            # using WAMP running over 2 pipes.
            # For controller->container traffic this runs over FD 0 (`stdin`)
            # and for the container->controller traffic, this runs over FD 3.
            #
            # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for
            # container->controller traffic, so that components running in the
            # container which happen to write to `stdout` or `stderr` do not
            # interfere with the container-controller communication.
            childFDs = {0: "w", 1: "r", 2: "r", 3: "r"}

        ep = WorkerProcessEndpoint(self._node._reactor,
                                   exe,
                                   args,
                                   env=worker_env,
                                   worker=worker,
                                   childFDs=childFDs)

        # ready handling
        #
        def on_ready_success(id):
            self.log.info("{worker} with ID '{id}' and PID {pid} started",
                          worker=worker_logname,
                          id=worker.id,
                          pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before',
                'shutdown',
                self._cleanup_worker,
                self._node._reactor,
                worker,
            )

            worker.status = 'started'
            worker.started = datetime.utcnow()

            started_info = {
                'id': worker.id,
                'status': worker.status,
                'started': utcstr(worker.started),
                'who': worker.who
            }

            # FIXME: make start of stats printer dependent on log level ..
            worker.log_stats(5.)

            self.publish(started_topic,
                         started_info,
                         options=PublishOptions(exclude=[details.caller]))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]
            emsg = 'Failed to start native worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg,
                                   worker.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(_):
            self.log.info("Node worker {} ended successfully".format(
                worker.id))
            worker.log_stats(0)
            del self._workers[worker.id]
            return True

        def on_exit_error(err):
            self.log.info("Node worker {} ended with error ({})".format(
                worker.id, err))
            worker.log_stats(0)
            del self._workers[worker.id]
            return False

        def check_for_shutdown(was_successful):
            shutdown = False

            # automatically shutdown node whenever a worker ended (successfully, or with error)
            #
            if checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended, and trigger '{}' active".format(
                        checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT))
                shutdown = True

            # automatically shutdown node when worker ended with error
            #
            if not was_successful and checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended with error, and trigger '{}' active".
                    format(
                        checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR))
                shutdown = True

            # automatically shutdown node when no more workers are left
            #
            if len(
                    self._workers
            ) == 0 and checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "No more node workers running, and trigger '{}' active".
                    format(checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT))
                shutdown = True

            # initiate shutdown (but only if we are not already shutting down)
            #
            if shutdown:
                if not self._shutdown_requested:
                    self.log.info("Node shutting down ..")
                    self.shutdown()
                else:
                    # ignore: shutdown already initiated ..
                    self.log.info("Node is already shutting down.")
            else:
                self.log.info(
                    "Node will continue to run (node shutdown triggers active: {})"
                    .format(self._node._node_shutdown_triggers))

        d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error)
        d_on_exit.addBoth(check_for_shutdown)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_native_worker_client_factory(
            self._node._router_session_factory, worker.ready, worker.exit)
        transport_factory.noisy = False
        self._workers[id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            'id': id,
            'status': worker.status,
            'created': utcstr(worker.created),
            'who': worker.who
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic,
                     starting_info,
                     options=PublishOptions(exclude=[details.caller]))

        # now actually fork the worker ..
        #
        self.log.info("Starting {worker} with ID '{id}'...",
                      worker=worker_logname,
                      id=id)
        self.log.debug("{worker} '{id}' command line is '{cmdline}'",
                       worker=worker_logname,
                       id=id,
                       cmdline=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will errback ..

            pid = proto.transport.pid
            self.log.debug("Native worker process connected with PID {pid}",
                           pid=pid)

            # note the PID of the worker
            worker.pid = pid

            # proto is an instance of NativeWorkerClientProtocol
            worker.proto = proto

            worker.status = 'connected'
            worker.connected = datetime.utcnow()

        def on_connect_error(err):

            # not sure when this errback is triggered at all ..
            self.log.error(
                "Interal error: connection to forked native worker failed ({err})",
                err=err)

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready

    @staticmethod
    def _cleanup_worker(reactor, worker):
        """
        This is called during reactor shutdown and ensures we wait for our
        subprocesses to shut down nicely.
        """
        log = make_logger()
        try:
            log.info("sending TERM to subprocess {pid}", pid=worker.pid)
            worker.proto.transport.signalProcess('TERM')
            # wait for the subprocess to shutdown; could add a timeout
            # after which we send a KILL maybe?
            d = Deferred()

            def protocol_closed(_):
                log.debug("{pid} exited", pid=worker.pid)
                d.callback(None)

            # await worker's timely demise
            worker.exit.addCallback(protocol_closed)

            def timeout(tried):
                if d.called:
                    return
                log.info("waiting for {pid} to exit...", pid=worker.pid)
                reactor.callLater(1, timeout, tried + 1)
                if tried > 20:  # or just wait forever?
                    log.info("Sending SIGKILL to {pid}", pid=worker.pid)
                    try:
                        worker.proto.transport.signalProcess('KILL')
                    except ProcessExitedAlready:
                        pass  # ignore; it's already dead
                    d.callback(None)  # or recurse more?

            timeout(0)
            return d
        except ProcessExitedAlready:
            pass  # ignore; it's already dead

    def stop_router(self, id, kill=False, details=None):
        """
        Stops a currently running router worker.

        :param id: The ID of the router worker to stop.
        :type id: str
        :param kill: If `True`, kill the process. Otherwise, gracefully
                     shut down the worker.
        :type kill: bool

        :returns: Stopping information from the worker.
        :rtype: dict
        """
        self.log.debug("NodeControllerSession.stop_router({id}, kill={kill})",
                       id=id,
                       kill=kill)

        return self._stop_native_worker('router', id, kill, details=details)

    def stop_container(self, id, kill=False, details=None):
        """
        Stops a currently running container worker.

        :param id: The ID of the container worker to stop.
        :type id: str
        :param kill: If `True`, kill the process. Otherwise, gracefully
                     shut down the worker.
        :type kill: bool

        :returns: Stopping information from the worker.
        :rtype: dict
        """
        self.log.debug(
            "NodeControllerSession.stop_container({id}, kill={kill})",
            id=id,
            kill=kill)

        return self._stop_native_worker('container', id, kill, details=details)

    def stop_websocket_testee(self, id, kill=False, details=None):
        """
        Stops a currently running websocket-testee worker.

        :param id: The ID of the worker to stop.
        :type id: str
        :param kill: If `True`, kill the process. Otherwise, gracefully
                     shut down the worker.
        :type kill: bool

        :returns: Stopping information from the worker.
        :rtype: dict
        """
        self.log.debug(
            "NodeControllerSession.stop_websocket_testee({id}, kill={kill})",
            id=id,
            kill=kill)

        return self._stop_native_worker('websocket-testee',
                                        id,
                                        kill,
                                        details=details)

    @inlineCallbacks
    def _stop_native_worker(self, wtype, id, kill, details=None):

        assert (wtype in ['router', 'container', 'websocket-testee'])

        if id not in self._workers or self._workers[id].TYPE != wtype:
            emsg = "Could not stop native worker: no {} worker with ID '{}' currently running".format(
                wtype, id)
            raise ApplicationError(u'crossbar.error.worker_not_running', emsg)

        worker = self._workers[id]

        if worker.status != 'started':
            emsg = "Could not stop native worker: worker with ID '{}' is not in status 'started', but status: '{}')".format(
                id, worker.status)
            raise ApplicationError(u'crossbar.error.worker_not_running', emsg)

        stop_info = {
            'id': worker.id,
            'type': wtype,
            'kill': kill,
            'who': details.caller if details else None,
            'when': utcnow()
        }

        # publish management API event
        #
        yield self.publish(
            'crossbar.node.{}.worker.{}.on_stop_requested'.format(
                self._node_id, worker.id),
            stop_info,
            options=PublishOptions(
                exclude=[details.caller] if details else None,
                acknowledge=True))

        # send SIGKILL or SIGTERM to worker
        #
        if kill:
            self.log.info("Killing {wtype} worker with ID '{id}'",
                          wtype=wtype,
                          id=id)
            self._workers[id].proto.transport.signalProcess("KILL")
        else:
            self.log.info("Stopping {wtype} worker with ID '{id}'",
                          wtype=wtype,
                          id=id)
            self._workers[id].factory.stopFactory()
            self._workers[id].proto.transport.signalProcess('TERM')

        returnValue(stop_info)

    def start_guest(self, id, config, details=None):
        """
        Start a new guest process on this node.

        :param config: The guest process configuration.
        :type config: obj

        :returns: int -- The PID of the new process.
        """
        # prohibit starting a worker twice
        #
        if id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(
                id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running',
                                   emsg)

        try:
            checkconfig.check_guest(config)
        except Exception as e:
            raise ApplicationError(
                u'crossbar.error.invalid_configuration',
                'invalid guest worker configuration: {}'.format(e))

        options = config.get('options', {})

        # guest process working directory
        #
        workdir = self._node._cbdir
        if 'workdir' in options:
            workdir = os.path.join(workdir, options['workdir'])
        workdir = os.path.abspath(workdir)

        # guest process executable and command line arguments
        #

        # first try to configure the fully qualified path for the guest
        # executable by joining workdir and configured exectuable ..
        exe = os.path.abspath(os.path.join(workdir, config['executable']))

        if check_executable(exe):
            self.log.info(
                "Using guest worker executable '{exe}' (executable path taken from configuration)",
                exe=exe)
        else:
            # try to detect the fully qualified path for the guest
            # executable by doing a "which" on the configured executable name
            exe = shutil.which(config['executable'])
            if exe is not None and check_executable(exe):
                self.log.info(
                    "Using guest worker executable '{exe}' (executable path detected from environment)",
                    exe=exe)
            else:
                emsg = "Could not start worker: could not find and executable for '{}'".format(
                    config['executable'])
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)

        # guest process command line arguments
        #
        args = [exe]
        args.extend(config.get('arguments', []))

        # guest process environment
        #
        worker_env = create_process_env(options)

        # log name of worker
        #
        worker_logname = 'Guest'

        # topic URIs used (later)
        #
        starting_topic = 'crossbar.node.{}.on_guest_starting'.format(
            self._node_id)
        started_topic = 'crossbar.node.{}.on_guest_started'.format(
            self._node_id)

        # add worker tracking instance to the worker map ..
        #
        worker = GuestWorkerProcess(self,
                                    id,
                                    details.caller,
                                    keeplog=options.get('traceback', None))

        self._workers[id] = worker

        # create a (custom) process endpoint
        #
        ep = WorkerProcessEndpoint(self._node._reactor,
                                   exe,
                                   args,
                                   path=workdir,
                                   env=worker_env,
                                   worker=worker)

        # ready handling
        #
        def on_ready_success(proto):

            worker.pid = proto.transport.pid
            worker.status = 'started'
            worker.started = datetime.utcnow()

            self.log.info("{worker} with ID '{id}' and PID {pid} started",
                          worker=worker_logname,
                          id=worker.id,
                          pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before',
                'shutdown',
                self._cleanup_worker,
                self._node._reactor,
                worker,
            )

            # directory watcher
            #
            if 'watch' in options:

                if HAS_FSNOTIFY:

                    # assemble list of watched directories
                    watched_dirs = []
                    for d in options['watch'].get('directories', []):
                        watched_dirs.append(
                            os.path.abspath(os.path.join(self._node._cbdir,
                                                         d)))

                    worker.watch_timeout = options['watch'].get('timeout', 1)

                    # create a directory watcher
                    worker.watcher = DirWatcher(dirs=watched_dirs,
                                                notify_once=True)

                    # make sure to stop the background thread running inside the
                    # watcher upon Twisted being shut down
                    def on_shutdown():
                        worker.watcher.stop()

                    self._node._reactor.addSystemEventTrigger(
                        'before', 'shutdown', on_shutdown)

                    # this handler will get fired by the watcher upon detecting an FS event
                    def on_fsevent(evt):
                        worker.watcher.stop()
                        proto.signal('TERM')

                        if options['watch'].get('action', None) == 'restart':
                            self.log.info("Restarting guest ..")
                            # Add a timeout large enough (perhaps add a config option later)
                            self._node._reactor.callLater(
                                worker.watch_timeout, self.start_guest, id,
                                config, details)
                            # Shut the worker down, after the restart event is scheduled
                            worker.stop()

                    # now run the watcher on a background thread
                    deferToThread(worker.watcher.loop, on_fsevent)

                else:
                    self.log.warn(
                        "Warning: cannot watch directory for changes - feature DirWatcher unavailable"
                    )

            # assemble guest worker startup information
            #
            started_info = {
                'id': worker.id,
                'status': worker.status,
                'started': utcstr(worker.started),
                'who': worker.who
            }

            self.publish(started_topic,
                         started_info,
                         options=PublishOptions(exclude=[details.caller]))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]

            emsg = 'Failed to start guest worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg,
                                   ep.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(res):
            self.log.info("Guest {id} exited with success", id=worker.id)
            del self._workers[worker.id]

        def on_exit_error(err):
            self.log.error("Guest {id} exited with error {err.value}",
                           id=worker.id,
                           err=err)
            del self._workers[worker.id]

        worker.exit.addCallbacks(on_exit_success, on_exit_error)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_guest_worker_client_factory(
            config, worker.ready, worker.exit)
        transport_factory.noisy = False
        self._workers[id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            'id': id,
            'status': worker.status,
            'created': utcstr(worker.created),
            'who': worker.who
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic,
                     starting_info,
                     options=PublishOptions(exclude=[details.caller]))

        # now actually fork the worker ..
        #
        self.log.info("Starting {worker} with ID '{id}'...",
                      worker=worker_logname,
                      id=id)
        self.log.debug("{worker} '{id}' using command line '{cli}'...",
                       worker=worker_logname,
                       id=id,
                       cli=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will
            # errback - probably only if the forking of a new process fails
            # at OS level due to out of memory conditions or such.

            pid = proto.transport.pid
            self.log.debug("Guest worker process connected with PID {pid}",
                           pid=pid)

            worker.pid = pid

            # proto is an instance of GuestWorkerClientProtocol
            worker.proto = proto

            worker.status = 'connected'
            worker.connected = datetime.utcnow()

        def on_connect_error(err):

            # not sure when this errback is triggered at all .. see above.
            self.log.error(
                "Internal error: connection to forked guest worker failed ({})"
                .format(err))

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready

    def stop_guest(self, id, kill=False, details=None):
        """
        Stops a currently running guest worker.

        :param id: The ID of the guest worker to stop.
        :type id: str
        """
        self.log.debug("NodeControllerSession.stop_guest({id}, kill={kill})",
                       id=id,
                       kill=kill)

        if id not in self._workers or self._workers[id].worker_type != 'guest':
            emsg = "Could not stop guest worker: no guest worker with ID '{}' currently running".format(
                id)
            raise ApplicationError(u'crossbar.error.worker_not_running', emsg)

        try:
            if kill:
                self._workers[id].proto.transport.signalProcess("KILL")
            else:
                self._workers[id].proto.transport.loseConnection()
        except Exception as e:
            emsg = "Could not stop guest worker with ID '{}': {}".format(id, e)
            raise ApplicationError(u'crossbar.error.stop_worker_failed', emsg)
        else:
            del self._workers[id]
Ejemplo n.º 57
0
def run_command_start(options, reactor=None):
    """
    Subcommand "crossbar start".
    """
    assert reactor

    # do not allow to run more than one Crossbar.io instance
    # from the same Crossbar.io node directory
    #
    pid_data = check_is_running(options.cbdir)
    if pid_data:
        print(
            "Crossbar.io is already running from node directory {} (PID {}).".
            format(options.cbdir, pid_data['pid']))
        sys.exit(1)
    else:
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        with open(fp, 'wb') as fd:
            argv = options.argv
            options_dump = vars(options)
            pid_data = {
                'pid': os.getpid(),
                'argv': argv,
                'options': {
                    x: y
                    for x, y in options_dump.items()
                    if x not in ["func", "argv"]
                }
            }
            fd.write("{}\n".format(
                json.dumps(pid_data,
                           sort_keys=False,
                           indent=4,
                           separators=(', ', ': '),
                           ensure_ascii=False)).encode('utf8'))

    # remove node PID file when reactor exits
    #
    def remove_pid_file():
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        if os.path.isfile(fp):
            os.remove(fp)

    reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file)

    log = make_logger()

    # possibly generate new node key
    #
    from crossbar.controller.node import maybe_generate_key
    pubkey = maybe_generate_key(log, options.cbdir)

    # Print the banner.
    for line in BANNER.splitlines():
        log.info(click.style(("{:>40}").format(line), fg='yellow', bold=True))

    # bannerFormat = "{:<18} {:<24}"
    bannerFormat = "    {} {}"
    log.info(
        bannerFormat.format(
            "Crossbar.io Version:",
            click.style(crossbar.__version__, fg='yellow', bold=True)))
    if pubkey:
        log.info(
            bannerFormat.format("Node Public Key:",
                                click.style(pubkey, fg='yellow', bold=True)))
    log.info()

    log.info("Running from node directory '{cbdir}'", cbdir=options.cbdir)

    from twisted.python.reflect import qual
    log.info("Controller process starting ({python}-{reactor}) ..",
             python=platform.python_implementation(),
             reactor=qual(reactor.__class__).split('.')[-1])

    from crossbar.controller.node import Node
    from crossbar.common.checkconfig import InvalidConfigException

    # represents the running Crossbar.io node
    #
    node = Node(options.cbdir, reactor=reactor)

    # check and load the node configuration
    #
    try:
        if options.config:
            # load node config from file
            node.load(options.config)
        elif options.cdc:
            # load built-in CDC config
            node.load()
        else:
            # no config file, and not running CDC mode
            raise Exception(
                "Neither a node config was found, nor CDC mode is active.")
    except InvalidConfigException as e:
        log.error("Invalid node configuration")
        log.error("{e!s}", e=e)
        sys.exit(1)
    except:
        raise

    # now actually start the node ..
    #
    def start_crossbar():
        d = node.start(cdc_mode=options.cdc)

        def on_error(err):
            log.error("{e!s}", e=err.value)
            log.error("Could not start node")
            if reactor.running:
                reactor.stop()

        d.addErrback(on_error)

    reactor.callWhenRunning(start_crossbar)

    # enter event loop
    #
    try:
        reactor.run()
    except Exception:
        log.failure("Could not start reactor - {log_failure.value}")
Ejemplo n.º 58
0
def run_command_start(options):
    """
    Subcommand "crossbar start".
    """
    # do not allow to run more than one Crossbar.io instance
    # from the same Crossbar.io node directory
    #
    pid_data = check_is_running(options.cbdir)
    if pid_data:
        print(
            "Crossbar.io is already running from node directory {} (PID {}).".
            format(options.cbdir, pid_data['pid']))
        sys.exit(1)
    else:
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        with open(fp, 'w') as fd:
            argv = options.argv
            options_dump = vars(options)
            del options_dump['func']
            del options_dump['argv']
            pid_data = {
                'pid': os.getpid(),
                'argv': argv,
                'options': options_dump
            }
            fd.write("{}\n".format(
                json.dumps(pid_data,
                           sort_keys=False,
                           indent=3,
                           separators=(',', ': '))))

    # we use an Autobahn utility to import the "best" available Twisted reactor
    #
    reactor = install_reactor(options.reactor, options.debug)

    # remove node PID file when reactor exits
    #
    def remove_pid_file():
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        if os.path.isfile(fp):
            os.remove(fp)

    reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file)

    # start Twisted logging
    #
    from crossbar._logging import log_publisher, make_logger
    from crossbar._logging import start_logging, set_global_log_level

    set_global_log_level(options.loglevel)

    log = make_logger()

    if options.logdir:
        # We want to log to a file
        from crossbar._logging import make_legacy_daily_logfile_observer

        log_publisher.addObserver(
            make_legacy_daily_logfile_observer(options.logdir,
                                               options.loglevel))
    else:
        # We want to log to stdout/stderr.
        from crossbar._logging import make_stdout_observer
        from crossbar._logging import make_stderr_observer

        if options.loglevel == "none":
            # Do no logging!
            pass
        elif options.loglevel == "error":
            # Error: Only print errors to stderr.
            log_publisher.addObserver(
                make_stdout_observer(show_source=False,
                                     format=options.logformat))
            log_publisher.addObserver(
                make_stderr_observer(show_source=False,
                                     format=options.logformat))
        elif options.loglevel == "warn":
            # Print warnings+ to stderr.
            log_publisher.addObserver(
                make_stdout_observer(show_source=False,
                                     format=options.logformat))
            log_publisher.addObserver(
                make_stderr_observer(show_source=False,
                                     format=options.logformat))
        elif options.loglevel == "info":
            # Print info to stdout, warn+ to stderr
            log_publisher.addObserver(
                make_stdout_observer(show_source=False,
                                     format=options.logformat))
            log_publisher.addObserver(
                make_stderr_observer(show_source=False,
                                     format=options.logformat))
        elif options.loglevel == "debug":
            # Print debug+info to stdout, warn+ to stderr, with the class
            # source
            log_publisher.addObserver(
                make_stdout_observer(show_source=True,
                                     format=options.logformat))
            log_publisher.addObserver(
                make_stderr_observer(show_source=True,
                                     format=options.logformat))
        elif options.loglevel == "trace":
            # Print trace+, with the class source
            log_publisher.addObserver(
                make_stdout_observer(show_source=True,
                                     format=options.logformat,
                                     trace=True))
            log_publisher.addObserver(
                make_stderr_observer(show_source=True,
                                     format=options.logformat))
        else:
            assert False, "Shouldn't ever get here."

    # Actually start the logger.
    start_logging()

    for line in BANNER.splitlines():
        log.info(click.style(("{:>40}").format(line), fg='yellow', bold=True))

    bannerFormat = "{:>12} {:<24}"
    log.info(
        bannerFormat.format(
            "Version:",
            click.style(crossbar.__version__, fg='yellow', bold=True)))
    # log.info(bannerFormat.format("Python:", click.style(platform.python_implementation(), fg='yellow', bold=True)))
    # log.info(bannerFormat.format("Reactor:", click.style(qual(reactor.__class__).split('.')[-1], fg='yellow', bold=True)))
    log.info(
        bannerFormat.format("Started:",
                            click.style(utcnow(), fg='yellow', bold=True)))
    log.info()

    log.info("Starting from node directory {}".format(options.cbdir))

    # create and start Crossbar.io node
    #
    def start_crossbar():
        from crossbar.controller.node import Node
        node = Node(reactor, options)
        d = node.start()

        def on_error(err):
            log.error("Could not start node: {error}", error=err.value)
            if reactor.running:
                reactor.stop()

        d.addErrback(on_error)

    reactor.callWhenRunning(start_crossbar)

    try:
        log.info("Entering reactor event loop...")
        reactor.run()
    except Exception:
        log.failure("Could not start reactor: {log_failure.value}")
Ejemplo n.º 59
0
class NativeProcessSession(ApplicationSession):
    """
    A native Crossbar.io process (currently: controller, router or container).
    """
    log = make_logger()

    def __init__(self, config=None, reactor=None):
        if not reactor:
            from twisted.internet import reactor
            self._reactor = reactor

        self._reactor = reactor
        super(ApplicationSession, self).__init__(config=config)

    def onConnect(self, do_join=True):
        """
        """
        if not hasattr(self, 'cbdir'):
            self.cbdir = self.config.extra.cbdir

        if not hasattr(self, '_uri_prefix'):
            self._uri_prefix = 'crossbar.node.{}'.format(self.config.extra.node)

        self._started = datetime.utcnow()

        # see: BaseSession
        self.include_traceback = False
        self.debug_app = False

        self._manhole_service = None

        if _HAS_PSUTIL:
            self._pinfo = ProcessInfo()
            self._pinfo_monitor = None
            self._pinfo_monitor_seq = 0
        else:
            self._pinfo = None
            self._pinfo_monitor = None
            self._pinfo_monitor_seq = None
            self.log.info("Process utilities not available")

        self._connections = {}

        if do_join:
            self.join(self.config.realm)

    @inlineCallbacks
    def onJoin(self, details):
        """
        Called when process has joined the node's management realm.
        """
        procs = [
            'start_manhole',
            'stop_manhole',
            'get_manhole',

            'start_connection',
            'stop_connection',
            'get_connections',

            'trigger_gc',

            'utcnow',
            'started',
            'uptime',

            'get_process_info',
            'get_process_stats',
            'set_process_stats_monitoring'
        ]

        dl = []
        for proc in procs:
            uri = '{}.{}'.format(self._uri_prefix, proc)
            self.log.debug("Registering procedure '{uri}'", uri=uri)
            dl.append(self.register(getattr(self, proc), uri, options=RegisterOptions(details_arg='details')))

        regs = yield DeferredList(dl)

        self.log.debug("Registered {len_reg} procedures", len_reg=len(regs))

    @inlineCallbacks
    def start_connection(self, id, config, details=None):
        """
        Starts a connection in this process.

        :param id: The ID for the started connection.
        :type id: unicode
        :param config: Connection configuration.
        :type config: dict
        :param details: Caller details.
        :type details: instance of :class:`autobahn.wamp.types.CallDetails`

        :returns dict -- The connection.
        """
        self.log.debug("start_connection: id={id}, config={config}", id=id, config=config)

        # prohibit starting a component twice
        #
        if id in self._connections:
            emsg = "cannot start connection: a connection with id={} is already started".format(id)
            self.log.warn(emsg)
            raise ApplicationError(u"crossbar.error.invalid_configuration", emsg)

        # check configuration
        #
        try:
            checkconfig.check_connection(config)
        except Exception as e:
            emsg = "invalid connection configuration ({})".format(e)
            self.log.warn(emsg)
            raise ApplicationError(u"crossbar.error.invalid_configuration", emsg)
        else:
            self.log.info("Starting {} in process.".format(config['type']))

        if config['type'] == u'postgresql.connection':
            if _HAS_POSTGRESQL:
                connection = PostgreSQLConnection(id, config)
            else:
                emsg = "unable to start connection - required PostgreSQL driver package not installed"
                self.log.warn(emsg)
                raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)
        else:
            # should not arrive here
            raise Exception("logic error")

        self._connections[id] = connection

        try:
            yield connection.start()
            self.log.info("Connection {connection_type} started '{connection_id}'", connection_id=id, connection_type=config['type'])
        except Exception as e:
            del self._connections[id]
            raise

        state = connection.marshal()

        self.publish(u'crossbar.node.process.on_connection_start', state)

        returnValue(state)

    @inlineCallbacks
    def stop_connection(self, id, details=None):
        """
        Stop a connection currently running within this process.

        :param id: The ID of the connection to stop.
        :type id: unicode
        :param details: Caller details.
        :type details: instance of :class:`autobahn.wamp.types.CallDetails`

        :returns dict -- A dict with component start information.
        """
        self.log.debug("stop_connection: id={id}", id=id)

        if id not in self._connections:
            raise ApplicationError(u'crossbar.error.no_such_object', 'no connection with ID {} running in this process'.format(id))

        connection = self._connections[id]

        try:
            yield connection.stop()
        except Exception as e:
            self.log.warn('could not stop connection {id}: {error}', error=e)
            raise

        del self._connections[id]

        state = connection.marshal()

        self.publish(u'crossbar.node.process.on_connection_stop', state)

        returnValue(state)

    def get_connections(self, details=None):
        """
        Get connections currently running within this processs.

        :param details: Caller details.
        :type details: instance of :class:`autobahn.wamp.types.CallDetails`

        :returns list -- List of connections.
        """
        self.log.debug("get_connections")

        res = []
        for c in self._connections.values():
            res.append(c.marshal())
        return res

    def get_process_info(self, details=None):
        """
        Get process information (open files, sockets, ...).

        :returns: dict -- Dictionary with process information.
        """
        self.log.debug("{cls}.get_process_info",
                       cls=self.__class__.__name__)

        if self._pinfo:
            return self._pinfo.get_info()
        else:
            emsg = "Could not retrieve process statistics: required packages not installed"
            raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)

    def get_process_stats(self, details=None):
        """
        Get process statistics (CPU, memory, I/O).

        :returns: dict -- Dictionary with process statistics.
        """
        self.log.debug("{cls}.get_process_stats", cls=self.__class__.__name__)

        if self._pinfo:
            return self._pinfo.get_stats()
        else:
            emsg = "Could not retrieve process statistics: required packages not installed"
            raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)

    def set_process_stats_monitoring(self, interval, details=None):
        """
        Enable/disable periodic publication of process statistics.

        :param interval: The monitoring interval in seconds. Set to 0 to disable monitoring.
        :type interval: float
        """
        self.log.debug("{cls}.set_process_stats_monitoring(interval = {interval})",
                       cls=self.__class__.__name__, interval=interval)

        if self._pinfo:

            stats_monitor_set_topic = '{}.on_process_stats_monitoring_set'.format(self._uri_prefix)

            # stop and remove any existing monitor
            if self._pinfo_monitor:
                self._pinfo_monitor.stop()
                self._pinfo_monitor = None

                self.publish(stats_monitor_set_topic, 0, options=PublishOptions(exclude=details.caller))

            # possibly start a new monitor
            if interval > 0:
                stats_topic = '{}.on_process_stats'.format(self._uri_prefix)

                def publish_stats():
                    stats = self._pinfo.get_stats()
                    self._pinfo_monitor_seq += 1
                    stats[u'seq'] = self._pinfo_monitor_seq
                    self.publish(stats_topic, stats)

                self._pinfo_monitor = LoopingCall(publish_stats)
                self._pinfo_monitor.start(interval)

                self.publish(stats_monitor_set_topic, interval, options=PublishOptions(exclude=details.caller))
        else:
            emsg = "Cannot setup process statistics monitor: required packages not installed"
            raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)

    def trigger_gc(self, details=None):
        """
        Manually trigger a garbage collection in this native process.

        This procedure is registered under
        ``crossbar.node.<node_id>.worker.<worker_id>.trigger_gc``
        for native workers and under
        ``crossbar.node.<node_id>.controller.trigger_gc``
        for node controllers.

        The procedure will publish an event when the garabage collection has finished to
        ``crossbar.node.<node_id>.worker.<worker_id>.on_gc_finished``
        for native workers and
        ``crossbar.node.<node_id>.controller.on_gc_finished``
        for node controllers:

        .. code-block:: javascript

            {
                "requester": {
                    "session_id": 982734923,
                    "auth_id": "bob",
                    "auth_role": "admin"
                },
                "duration": 190
            }

        .. note:: The caller of this procedure will NOT receive the event.

        :returns: Time (wall clock) consumed for garbage collection in ms.
        :rtype: int
        """
        self.log.debug("{cls}.trigger_gc", cls=self.__class__.__name__)

        started = rtime()

        # now trigger GC .. this is blocking!
        gc.collect()

        duration = int(round(1000. * (rtime() - started)))

        on_gc_finished = u'{}.on_gc_finished'.format(self._uri_prefix)
        self.publish(
            on_gc_finished,
            {
                u'requester': {
                    u'session_id': details.caller,
                    # FIXME:
                    u'auth_id': None,
                    u'auth_role': None
                },
                u'duration': duration
            },
            options=PublishOptions(exclude=details.caller)
        )

        return duration

    @inlineCallbacks
    def start_manhole(self, config, details=None):
        """
        Start a Manhole service within this process.

        **Usage:**

        This procedure is registered under

        * ``crossbar.node.<node_id>.worker.<worker_id>.start_manhole`` - for native workers
        * ``crossbar.node.<node_id>.controller.start_manhole`` - for node controllers

        The procedure takes a Manhole service configuration which defines
        a listening endpoint for the service and a list of users including
        passwords, e.g.

        .. code-block:: javascript

            {
                "endpoint": {
                    "type": "tcp",
                    "port": 6022
                },
                "users": [
                    {
                        "user": "******",
                        "password": "******"
                    }
                ]
            }

        **Errors:**

        The procedure may raise the following errors:

        * ``crossbar.error.invalid_configuration`` - the provided configuration is invalid
        * ``crossbar.error.already_started`` - the Manhole service is already running (or starting)
        * ``crossbar.error.feature_unavailable`` - the required support packages are not installed

        **Events:**

        The procedure will publish an event when the service **is starting** to

        * ``crossbar.node.<node_id>.worker.<worker_id>.on_manhole_starting`` - for native workers
        * ``crossbar.node.<node_id>.controller.on_manhole_starting`` - for node controllers

        and publish an event when the service **has started** to

        * ``crossbar.node.<node_id>.worker.<worker_id>.on_manhole_started`` - for native workers
        * ``crossbar.node.<node_id>.controller.on_manhole_started`` - for node controllers

        :param config: Manhole service configuration.
        :type config: dict
        """
        self.log.debug("{cls}.start_manhole(config = {config})",
                       cls=self.__class__.__name__, config=config)

        if not _HAS_MANHOLE:
            emsg = "Could not start manhole: required packages are missing ({})".format(_MANHOLE_MISSING_REASON)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)

        if self._manhole_service:
            emsg = "Could not start manhole - already running (or starting)"
            self.log.warn(emsg)
            raise ApplicationError(u"crossbar.error.already_started", emsg)

        try:
            checkconfig.check_manhole(config)
        except Exception as e:
            emsg = "Could not start manhole: invalid configuration ({})".format(e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)

        # setup user authentication
        #
        checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
        for user in config['users']:
            checker.addUser(user['user'], user['password'])

        # setup manhole namespace
        #
        namespace = {'session': self}

        class PatchedTerminalSession(TerminalSession):
            # get rid of
            # exceptions.AttributeError: TerminalSession instance has no attribute 'windowChanged'

            def windowChanged(self, winSize):
                pass

        rlm = TerminalRealm()
        rlm.sessionFactory = PatchedTerminalSession  # monkey patch
        rlm.chainedProtocolFactory.protocolFactory = lambda _: ColoredManhole(namespace)

        ptl = portal.Portal(rlm, [checker])

        factory = ConchFactory(ptl)
        factory.noisy = False

        self._manhole_service = ManholeService(config, details.caller)

        starting_topic = '{}.on_manhole_starting'.format(self._uri_prefix)
        starting_info = self._manhole_service.marshal()

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic, starting_info, options=PublishOptions(exclude=details.caller))

        try:
            self._manhole_service.port = yield create_listening_port_from_config(config['endpoint'],
                                                                                 self.cbdir,
                                                                                 factory,
                                                                                 self._reactor,
                                                                                 self.log)
        except Exception as e:
            self._manhole_service = None
            emsg = "Manhole service endpoint cannot listen: {}".format(e)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_listen", emsg)

        # alright, manhole has started
        self._manhole_service.started = datetime.utcnow()
        self._manhole_service.status = 'started'

        started_topic = '{}.on_manhole_started'.format(self._uri_prefix)
        started_info = self._manhole_service.marshal()
        self.publish(started_topic, started_info, options=PublishOptions(exclude=details.caller))

        returnValue(started_info)

    @inlineCallbacks
    def stop_manhole(self, details=None):
        """
        Stop the Manhole service running in this process.

        This procedure is registered under

        * ``crossbar.node.<node_id>.worker.<worker_id>.stop_manhole`` for native workers and under
        * ``crossbar.node.<node_id>.controller.stop_manhole`` for node controllers

        When no Manhole service is currently running within this process,
        or the Manhole service is already shutting down, a
        ``crossbar.error.not_started`` WAMP error is raised.

        The procedure will publish an event when the service **is stopping** to

        * ``crossbar.node.<node_id>.worker.<worker_id>.on_manhole_stopping`` for native workers and
        * ``crossbar.node.<node_id>.controller.on_manhole_stopping`` for node controllers

        and will publish an event when the service **has stopped** to

        * ``crossbar.node.<node_id>.worker.<worker_id>.on_manhole_stopped`` for native workers and
        * ``crossbar.node.<node_id>.controller.on_manhole_stopped`` for node controllers
        """
        self.log.debug("{cls}.stop_manhole", cls=self.__class__.__name__)

        if not self._manhole_service or self._manhole_service.status != 'started':
            emsg = "Cannot stop manhole: not running (or already shutting down)"
            raise ApplicationError(u"crossbar.error.not_started", emsg)

        self._manhole_service.status = 'stopping'

        stopping_topic = u'{}.on_manhole_stopping'.format(self._uri_prefix)
        stopping_info = None

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(stopping_info)

        # .. while all others get an event
        self.publish(stopping_topic, stopping_info, options=PublishOptions(exclude=details.caller))

        try:
            yield self._manhole_service.port.stopListening()
        except Exception as e:
            self.log.warn("error while stop listening on endpoint: {error}", error=e)

        self._manhole_service = None

        stopped_topic = u'{}.on_manhole_stopped'.format(self._uri_prefix)
        stopped_info = None
        self.publish(stopped_topic, stopped_info, options=PublishOptions(exclude=details.caller))

        returnValue(stopped_info)

    def get_manhole(self, details=None):
        """
        Get current manhole service information.

        :returns: dict -- A dict with service information or `None` if the service is not running.
        """
        self.log.debug("{cls}.get_manhole", cls=self.__class__.__name__)

        if not _HAS_MANHOLE:
            emsg = "Could not start manhole: required packages are missing ({})".format(_MANHOLE_MISSING_REASON)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.feature_unavailable", emsg)

        if not self._manhole_service:
            return None
        else:
            return self._manhole_service.marshal()

    def utcnow(self, details=None):
        """
        Return current time as determined from within this process.

        **Usage:**

        This procedure is registered under

        * ``crossbar.node.<node_id>.worker.<worker_id>.utcnow`` for native workers and under
        * ``crossbar.node.<node_id>.controller.utcnow`` for node controllers

        :returns: Current time (UTC) in UTC ISO 8601 format.
        :rtype: unicode
        """
        self.log.debug("{cls}.utcnow", cls=self.__class__.__name__)

        return utcnow()

    def started(self, details=None):
        """
        Return start time of this process.

        **Usage:**

        This procedure is registered under

        * ``crossbar.node.<node_id>.worker.<worker_id>.started`` for native workers and under
        * ``crossbar.node.<node_id>.controller.started`` for node controllers

        :returns: Start time (UTC) in UTC ISO 8601 format.
        :rtype: unicode
        """
        self.log.debug("{cls}.started", cls=self.__class__.__name__)

        return utcstr(self._started)

    def uptime(self, details=None):
        """
        Return uptime of this process.

        **Usage:**

        This procedure is registered under

        * ``crossbar.node.<node_id>.worker.<worker_id>.uptime`` for native workers and under
        * ``crossbar.node.<node_id>.controller.uptime`` for node controllers

        :returns: Uptime in seconds.
        :rtype: float
        """
        self.log.debug("{cls}.uptime", cls=self.__class__.__name__)

        now = datetime.utcnow()
        return (now - self._started).total_seconds()
Ejemplo n.º 60
0
class FileUploadResource(Resource):
    """
    Twisted Web resource that handles file uploads over `HTTP/POST` requests.
    """

    log = make_logger()

    def __init__(self,
                 upload_directory,
                 temp_directory,
                 form_fields,
                 upload_session,
                 options=None):
        """

        :param upload_directory: The target directory where uploaded files will be stored.
        :type upload_directory: str
        :param temp_directory: A temporary directory where chunks of a file being uploaded are stored.
        :type temp_directory: str
        :param form_fields: Names of HTML form fields used for uploading.
        :type form_fields: dict
        :param upload_session: An instance of `ApplicationSession` used for publishing progress events.
        :type upload_session: obj
        :param options: Options for file upload.
        :type options: dict or None
        """

        Resource.__init__(self)
        self._uploadRoot = upload_directory
        self._tempDirRoot = temp_directory
        self._form_fields = form_fields
        self._fileupload_session = upload_session
        self._options = options or {}
        self._max_file_size = self._options.get('max_file_size',
                                                10 * 1024 * 1024)
        self._fileTypes = self._options.get('file_types', None)
        self._file_permissions = self._options.get('file_permissions', None)

        # track uploaded files / chunks
        self._uploads = {}

        self.log.info('Upload Resource started.')

        # scan the temp dir for uploaded chunks and fill the _uploads dict with it
        # so existing uploads can be resumed
        # all other remains will be purged
        for _fileTempDir in os.listdir(self._tempDirRoot):
            fileTempDir = os.path.join(self._tempDirRoot, _fileTempDir)
            fileTempName = os.path.basename(fileTempDir)
            if os.path.isdir(fileTempDir):
                self._uploads[fileTempName] = {
                    'chunk_list': [],
                    'origin': 'startup'
                }
                for chunk in os.listdir(fileTempDir):
                    if chunk[:6] == 'chunk_':
                        self._uploads[fileTempName]['chunk_list'].append(
                            int(chunk[6:]))
                    else:
                        os.remove(os.path.join(fileTempDir, chunk))
                # if no chunks detected then remove remains completely
                if len(self._uploads[fileTempName]['chunk_list']) == 0:
                    shutil.rmtree(fileTempDir)
                    self._uploads.pop(fileTempName, None)
            else:  # fileTempDir is a file remaining from a single chunk upload
                os.remove(fileTempDir)

        self.log.debug("Scanned pending uploads: {uploads}",
                       uploads=self._uploads)

    def render_POST(self, request):
        headers = {
            x.decode('iso-8859-1'): y.decode('iso-8859-1')
            for x, y in request.getAllHeaders().items()
        }

        origin = headers['host']

        postFields = cgi.FieldStorage(fp=request.content,
                                      headers=headers,
                                      environ={"REQUEST_METHOD": "POST"})

        f = self._form_fields

        filename = postFields[f['file_name']].value
        totalSize = int(postFields[f['total_size']].value)
        totalChunks = int(postFields[f['total_chunks']].value)
        chunkSize = int(postFields[f['chunk_size']].value)
        chunkNumber = int(postFields[f['chunk_number']].value)
        fileContent = postFields[f['content']].value

        if 'chunk_extra' in f and f['chunk_extra'] in postFields:
            chunk_extra = json.loads(postFields[f['chunk_extra']].value)
        else:
            chunk_extra = {}

        if 'finish_extra' in f and f['finish_extra'] in postFields:
            finish_extra = json.loads(postFields[f['finish_extra']].value)
        else:
            finish_extra = {}

        fileId = filename

        # # prepare user specific upload areas
        # # NOT YET IMPLEMENTED
        # #
        # if 'auth_id' in f and f['auth_id'] in postFields:
        #     auth_id = postFields[f['auth_id']].value
        #     mydir = os.path.join(self._uploadRoot, auth_id)
        #     my_temp_dir = os.path.join(self._tempDirRoot, auth_id)
        #
        #     # check if auth_id is a valid directory_name
        #     #
        #     if auth_id != auth_id.encode('ascii', 'ignore'):
        #         msg = "The requestor auth_id must be an ascii string."
        #         if self._debug:
        #             log.msg(msg)
        #         # 415 Unsupported Media Type
        #         request.setResponseCode(415, msg)
        #         return msg
        # else:
        #     auth_id = 'anonymous'

        # create user specific folder

        # mydir = self._uploadRoot
        # my_temp_dir = self._tempDirRoot

        # if not os.path.exists(mydir):
        #     os.makedirs(mydir)
        # if not os.path.exists(my_temp_dir):
        #     os.makedirs(my_temp_dir)

        # prepare the on_progress publisher
        if 'on_progress' in f and f['on_progress'] in postFields and self._fileupload_session != {}:
            topic = postFields[f['on_progress']].value

            if 'session' in f and f['session'] in postFields:
                session = int(postFields[f['session']].value)
                publish_options = PublishOptions(eligible=[session])
            else:
                publish_options = None

            def fileupload_publish(payload):
                self._fileupload_session.publish(topic,
                                                 payload,
                                                 options=publish_options)
        else:

            def fileupload_publish(payload):
                pass

        # Register upload right at the start to avoid overlapping upload conflicts
        #
        if fileId not in self._uploads:
            self._uploads[fileId] = {'chunk_list': [], 'origin': origin}
            chunk_is_first = True
            self.log.debug(
                'Started upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                file_name=fileId,
                total_size=totalSize,
                total_chunks=totalChunks,
                chunk_size=chunkSize,
                chunk_number=chunkNumber)
        else:
            chunk_is_first = False
            # If the chunks are read at startup of crossbar any client may claim and resume the pending upload !
            #
            upl = self._uploads[fileId]
            if upl['origin'] == 'startup':
                self.log.debug(
                    'Will try to resume upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                    file_name=fileId,
                    total_size=totalSize,
                    total_chunks=totalChunks,
                    chunk_size=chunkSize,
                    chunk_number=chunkNumber)
                upl['origin'] = origin
            else:
                # check if another session is uploading this file already
                #
                if upl['origin'] != origin:
                    msg = "File being uploaded is already uploaded in a different session."
                    self.log.debug(msg)
                    # 409 Conflict
                    request.setResponseCode(409, msg.encode('utf8'))
                    return msg.encode('utf8')
                else:
                    # check if the chunk is being uploaded in this very session already
                    # this should never happen !
                    if chunkNumber in upl['chunk_list']:
                        msg = "Chunk beeing uploaded is already uploading."
                        self.log.debug(msg)
                        # Don't throw a conflict. This may be a wanted behaviour.
                        # Even if an upload would be resumable, you don't have to resume.
                        # 409 Conflict
                        # request.setResponseCode(409, msg.encode('utf8'))
                        # return msg.encode('utf8')

        # check file size
        #
        if totalSize > self._max_file_size:
            msg = "Size {} of file to be uploaded exceeds maximum {}".format(
                totalSize, self._max_file_size)
            self.log.debug(msg)
            # 413 Request Entity Too Large
            request.setResponseCode(413, msg.encode('utf8'))
            return msg.encode('utf8')

        # check file extensions
        #
        extension = os.path.splitext(filename)[1]
        if self._fileTypes and extension not in self._fileTypes:
            msg = "Type '{}' of file to be uploaded is in allowed types {}".format(
                extension, self._fileTypes)
            self.log.debug(msg)
            # 415 Unsupported Media Type
            request.setResponseCode(415, msg.encode('utf8'))
            return msg.encode('utf8')

        # TODO: check mime type
        #
        fileTempDir = os.path.join(self._tempDirRoot, fileId)
        chunkName = os.path.join(fileTempDir, 'chunk_' + str(chunkNumber))
        _chunkName = os.path.join(
            fileTempDir,
            '#kfhf3kz412uru578e38viokbjhfvz4w__' + 'chunk_' + str(chunkNumber))

        def mergeFile():
            # every chunk has to check if it is the last chunk written, except in a single chunk scenario
            if totalChunks > 1 and len(
                    self._uploads[fileId]['chunk_list']) >= totalChunks:
                # last chunk
                self.log.debug(
                    'Finished file upload after chunk {chunk_number} with chunk_list {chunk_list}',
                    chunk_number=chunkNumber,
                    chunk_list=self._uploads)

                # Merge all files into one file and remove the temp files
                # TODO: How to avoid the extra file IO ?
                finalFileName = os.path.join(self._uploadRoot, fileId)
                _finalFileName = os.path.join(
                    fileTempDir, '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
                with open(_finalFileName, 'wb') as _finalFile:
                    for cn in range(1, totalChunks + 1):
                        with open(
                                os.path.join(fileTempDir, 'chunk_' + str(cn)),
                                'rb') as ff:
                            _finalFile.write(ff.read())

                os.rename(_finalFileName, finalFileName)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        os.chmod(finalFileName, perm)
                    except Exception as e:
                        msg = "file upload resource - could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        self._uploads.pop(fileId, None)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug(
                            "Changed permissions on {file_name} to {permissions}",
                            file_name=finalFileName,
                            permissions=self._file_permissions)

                # remove the file temp folder
                shutil.rmtree(fileTempDir)

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                    "id": fileId,
                    "chunk": chunkNumber,
                    "name": filename,
                    "total": totalSize,
                    "remaining": 0,
                    "status": "finished",
                    "progress": 1.,
                    "finish_extra": finish_extra,
                    "chunk_extra": chunk_extra
                })

        if chunk_is_first:
            # first chunk of file

            # publish file upload start
            #
            fileupload_publish({
                "id": fileId,
                "chunk": chunkNumber,
                "name": filename,
                "total": totalSize,
                "remaining": totalSize,
                "status": "started",
                "progress": 0.,
                "chunk_extra": chunk_extra
            })

            if totalChunks == 1:
                # only one chunk overall -> write file directly
                finalFileName = os.path.join(self._uploadRoot, fileId)
                _finalFileName = os.path.join(
                    self._tempDirRoot,
                    '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)

                with open(_finalFileName, 'wb') as _finalFile:
                    _finalFile.write(fileContent)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        os.chmod(_finalFileName, perm)
                    except Exception as e:
                        # finalFileName.remove()
                        msg = "Could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug(
                            "Changed permissions on {file_name} to {permissions}",
                            file_name=finalFileName,
                            permissions=self._file_permissions)

                os.rename(_finalFileName, finalFileName)
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                    "id": fileId,
                    "chunk": chunkNumber,
                    "name": filename,
                    "total": totalSize,
                    "remaining": 0,
                    "status": "finished",
                    "progress": 1.,
                    "finish_extra": finish_extra,
                    "chunk_extra": chunk_extra
                })

            else:
                # first of more chunks
                # fileTempDir.remove()  # any potential conflict should have been resolved above. This should not be necessary!
                if not os.path.isdir(fileTempDir):
                    os.makedirs(fileTempDir)

                with open(_chunkName, 'wb') as chunk:
                    chunk.write(fileContent)
                os.rename(_chunkName,
                          chunkName)  # atomic file system operation
                self.log.debug('chunk_' + str(chunkNumber) +
                               ' written and moved to ' + chunkName)
                # publish file upload progress
                #
                fileupload_publish({
                    "id":
                    fileId,
                    "chunk":
                    chunkNumber,
                    "name":
                    filename,
                    "total":
                    totalSize,
                    "remaining":
                    totalSize - chunkSize,
                    "status":
                    "progress",
                    "progress":
                    round(float(chunkSize) / float(totalSize), 3),
                    "chunk_extra":
                    chunk_extra
                })
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)
                mergeFile()
            # clean the temp dir once per file upload
            self._remove_stale_uploads()

        else:
            # intermediate chunk
            if not os.path.isdir(fileTempDir):
                os.makedirs(fileTempDir)

            with open(_chunkName, 'wb') as chunk:
                chunk.write(fileContent)
            os.rename(_chunkName, chunkName)
            self.log.debug('chunk_' + str(chunkNumber) +
                           ' written and moved to ' + chunkName)

            if chunkNumber not in self._uploads[fileId]['chunk_list']:
                self._uploads[fileId]['chunk_list'].append(chunkNumber)

            received = sum(
                os.path.getsize(os.path.join(fileTempDir, f))
                for f in os.listdir(fileTempDir))

            fileupload_publish({
                "id":
                fileId,
                "chunk":
                chunkNumber,
                "name":
                filename,
                "total":
                totalSize,
                "remaining":
                totalSize - received,
                "status":
                "progress",
                "progress":
                round(float(received) / float(totalSize), 3),
                "chunk_extra":
                chunk_extra
            })
            mergeFile()
        # no errors encountered -> respond success
        request.setResponseCode(200)
        return b''

    def _remove_stale_uploads(self):
        """
        This only works if there is a temp folder exclusive for crossbar file uploads
        if the system temp folder is used then crossbar creates a "crossbar-uploads" there and
        uses that as the temp folder for uploads
        If you don't clean up regularly an attacker could fill up the OS file system
        """
        for _fileTempDir in os.listdir(self._tempDirRoot):
            fileTempDir = os.path.join(self._tempDirRoot, _fileTempDir)
            self.log.debug('REMOVE STALE UPLOADS ' +
                           str(os.path.basename(fileTempDir)))
            if os.path.isdir(fileTempDir) and os.path.basename(
                    fileTempDir) not in self._uploads:
                shutil.rmtree(fileTempDir)

    def render_GET(self, request):
        """
        This method can be used to check whether a chunk has been uploaded already.
        It returns with HTTP status code `200` if yes and `404` if not.
        The request needs to contain the file identifier and the chunk number to check for.
        """
        for param in ['file_name', 'chunk_number']:
            if not self._form_fields[param].encode(
                    'iso-8859-1') in request.args:
                msg = "file upload resource - missing request query parameter '{}', configured from '{}'".format(
                    self._form_fields[param], param)
                self.log.debug(msg)
                # 400 Bad Request
                request.setResponseCode(400, msg.encode('utf8'))
                return msg.encode('utf8')

        file_name = request.args[self._form_fields['file_name'].encode(
            'iso-8859-1')][0].decode('utf8')
        chunk_number = int(
            request.args[self._form_fields['chunk_number'].encode(
                'iso-8859-1')][0].decode('utf8'))

        # a complete upload will be repeated an incomplete upload will be resumed
        if file_name in self._uploads and chunk_number in self._uploads[
                file_name]['chunk_list']:
            self.log.debug(
                "Skipping chunk upload {file_name} of chunk {chunk_number}",
                file_name=file_name,
                chunk_number=chunk_number)
            msg = b"chunk of file already uploaded"
            request.setResponseCode(200, msg)
            return msg
        else:
            msg = b"chunk of file not yet uploaded"
            request.setResponseCode(404, msg)
            return msg