Exemple #1
0
    def __init__(self, level="debug"):
        self.logs = []
        self._old_log_level = get_global_log_level()
        self.desired_level = level
        self.log_text = StringIO()

        self._out_observer = make_stdout_observer(
            levels=(LogLevel.debug, LogLevel.info, LogLevel.warn,
                    LogLevel.error), _file=self.log_text, trace=True)
Exemple #2
0
    def __init__(self, level="debug"):
        self.logs = []
        self._old_log_level = get_global_log_level()
        self.desired_level = level
        self.log_text = StringIO()

        self._out_observer = make_stdout_observer(
            levels=(LogLevel.debug, LogLevel.info, LogLevel.warn,
                    LogLevel.error), _file=self.log_text, trace=True)
Exemple #3
0
    def _make_json(_event):
        event = dict(_event)
        level = event.pop("log_level", LogLevel.info).name

        # as soon as possible, we wish to give up if this event is
        # outside our target log-level; this is to prevent
        # (de-)serializing all the debug() messages (for example) from
        # workers to the controller.
        if log_levels.index(level) > log_levels.index(get_global_log_level()):
            return

        done_json = {
            "level": level,
            "namespace": event.pop("log_namespace", '')
        }

        eventText = formatEvent(event)

        if "log_failure" in event:
            # This is a traceback. Print it.
            traceback = event["log_failure"].getTraceback()

            if not six.PY3:
                traceback = traceback.decode('utf-8')
                linesep = os.linesep.decode('utf-8')
            else:
                linesep = os.linesep

            eventText = eventText + linesep + traceback

        done_json["text"] = escape_formatting(eventText)

        try:
            event.pop("log_logger", "")
            event.pop("log_format", "")
            event.pop("log_source", "")
            event.pop("log_system", "")
            event.pop("log_failure", "")
            event.pop("failure", "")
            event.update(done_json)

            text = encoder.encode(event)

        except Exception:
            text = encoder.encode({
                "text": done_json["text"],
                "level": "error",
                "namespace": "crossbar._logging"
            })

        if not isinstance(text, six.text_type):
            text = text.decode('utf8')

        print(text, end=record_separator, file=outFile)
        outFile.flush()
    def _make_json(_event):
        event = dict(_event)
        level = event.pop("log_level", LogLevel.info).name

        # as soon as possible, we wish to give up if this event is
        # outside our target log-level; this is to prevent
        # (de-)serializing all the debug() messages (for example) from
        # workers to the controller.
        if log_levels.index(level) > log_levels.index(get_global_log_level()):
            return

        done_json = {
            "level": level,
            "namespace": event.pop("log_namespace", '')
        }

        eventText = formatEvent(event)

        if "log_failure" in event:
            # This is a traceback. Print it.
            traceback = event["log_failure"].getTraceback()

            if not six.PY3:
                traceback = traceback.decode('utf-8')
                linesep = os.linesep.decode('utf-8')
            else:
                linesep = os.linesep

            eventText = eventText + linesep + traceback

        done_json["text"] = escape_formatting(eventText)

        try:
            event.pop("log_logger", "")
            event.pop("log_format", "")
            event.pop("log_source", "")
            event.pop("log_system", "")
            event.pop("log_failure", "")
            event.pop("failure", "")
            event.update(done_json)

            text = encoder.encode(event)

        except Exception:
            text = encoder.encode({
                "text": done_json["text"],
                "level": "error",
                "namespace": "crossbar._logging"})

        if not isinstance(text, six.text_type):
            text = text.decode('utf8')

        print(text, end=record_separator, file=outFile)
        outFile.flush()
Exemple #5
0
def test_trace(handler, framework):
    logger = txaio.make_logger()
    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("trace")

    # the txaio_trace variable should be in it
    logger.trace("trace {txaio_trace}", )

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 1
    assert handler.messages[0].endswith(b"trace True")
Exemple #6
0
def test_emit_noop(handler, framework):
    """
    emit() with a too-low level is an no-op.
    """
    logger = txaio.make_logger()

    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("info")

    logger.emit("debug", "foobar")

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 0
Exemple #7
0
def test_emit_noop(handler, framework):
    """
    emit() with a too-low level is an no-op.
    """
    logger = txaio.make_logger()

    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("info")

    logger.emit("debug", "foobar")

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 0
Exemple #8
0
def test_trace(handler, framework):
    logger = txaio.make_logger()
    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("trace")

    # the txaio_trace variable should be in it
    logger.trace(
        "trace {txaio_trace}",
    )

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 1
    assert handler.messages[0].endswith(b"trace True")
Exemple #9
0
def test_emit_ok(handler, framework):
    """
    emit() with an OK level emits the message.
    """
    logger = txaio.make_logger()

    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("trace")

    logger.emit("trace", "foobar")
    logger.emit("info", "barbaz")

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 2
    assert handler.messages[0].endswith(b"foobar")
    assert handler.messages[1].endswith(b"barbaz")
Exemple #10
0
def test_emit_ok(handler, framework):
    """
    emit() with an OK level emits the message.
    """
    logger = txaio.make_logger()

    old_log = txaio.get_global_log_level()
    txaio.set_global_log_level("trace")

    logger.emit("trace", "foobar")
    logger.emit("info", "barbaz")

    txaio.set_global_log_level(old_log)

    assert len(handler.messages) == 2
    assert handler.messages[0].endswith(b"foobar")
    assert handler.messages[1].endswith(b"barbaz")
 def test_set_global(self):
     """
     Setting the global log level via the function changes it.
     """
     set_global_log_level("warn")
     self.assertEqual(get_global_log_level(), "warn")
 def setUp(self):
     self.existing_level = get_global_log_level()
     return super(LoggerModuleTests, self).setUp()
Exemple #13
0
    def _start_native_worker(self, wtype, id, options=None, details=None):

        assert(wtype in ['router', 'container', 'websocket-testee'])

        # prohibit starting a worker twice
        #
        if id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running', emsg)

        # check worker options
        #
        options = options or {}
        try:
            if wtype == 'router':
                checkconfig.check_router_options(options)
            elif wtype == 'container':
                checkconfig.check_container_options(options)
            elif wtype == 'websocket-testee':
                checkconfig.check_websocket_testee_options(options)
            else:
                raise Exception("logic error")
        except Exception as e:
            emsg = "Could not start native worker: invalid configuration ({})".format(e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)

        # allow override Python executable from options
        #
        if 'python' in options:
            exe = options['python']

            # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy
            #
            if not os.path.isabs(exe):
                emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format(exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)

            # of course the path must exist and actually be executable
            #
            if not (os.path.isfile(exe) and os.access(exe, os.X_OK)):
                emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format(exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)
        else:
            exe = sys.executable

        # assemble command line for forking the worker
        #
        # all native workers (routers and containers for now) start
        # from the same script in crossbar/worker/process.py -- we're
        # invoking via "-m" so that .pyc files, __pycache__ etc work
        # properly.

        args = [exe, "-u", "-m", "crossbar.worker.process"]
        args.extend(["--cbdir", self._node._cbdir])
        args.extend(["--node", str(self._node_id)])
        args.extend(["--worker", str(id)])
        args.extend(["--realm", self._realm])
        args.extend(["--type", wtype])
        args.extend(["--loglevel", get_global_log_level()])

        # allow override worker process title from options
        #
        if options.get('title', None):
            args.extend(['--title', options['title']])

        # forward explicit reactor selection
        #
        if 'reactor' in options and sys.platform in options['reactor']:
            args.extend(['--reactor', options['reactor'][sys.platform]])
        # FIXME
        # elif self._node.options.reactor:
        #    args.extend(['--reactor', self._node.options.reactor])

        # create worker process environment
        #
        worker_env = create_process_env(options)

        # We need to use the same PYTHONPATH we were started with, so we can
        # find the Crossbar we're working with -- it may not be the same as the
        # one on the default path
        worker_env["PYTHONPATH"] = os.pathsep.join(sys.path)

        # log name of worker
        #
        worker_logname = {
            'router': 'Router',
            'container': 'Container',
            'websocket-testee': 'WebSocketTestee'
        }.get(wtype, 'Worker')

        # topic URIs used (later)
        #
        if wtype == 'router':
            starting_topic = 'crossbar.node.{}.on_router_starting'.format(self._node_id)
            started_topic = 'crossbar.node.{}.on_router_started'.format(self._node_id)
        elif wtype == 'container':
            starting_topic = 'crossbar.node.{}.on_container_starting'.format(self._node_id)
            started_topic = 'crossbar.node.{}.on_container_started'.format(self._node_id)
        elif wtype == 'websocket-testee':
            starting_topic = 'crossbar.node.{}.on_websocket_testee_starting'.format(self._node_id)
            started_topic = 'crossbar.node.{}.on_websocket_testee_started'.format(self._node_id)
        else:
            raise Exception("logic error")

        # add worker tracking instance to the worker map ..
        #
        if wtype == 'router':
            worker = RouterWorkerProcess(self, id, details.caller, keeplog=options.get('traceback', None))
        elif wtype == 'container':
            worker = ContainerWorkerProcess(self, id, details.caller, keeplog=options.get('traceback', None))
        elif wtype == 'websocket-testee':
            worker = WebSocketTesteeWorkerProcess(self, id, details.caller, keeplog=options.get('traceback', None))
        else:
            raise Exception("logic error")

        self._workers[id] = worker

        # create a (custom) process endpoint.
        #
        if platform.isWindows():
            childFDs = None  # Use the default Twisted ones
        else:
            # The communication between controller and container workers is
            # using WAMP running over 2 pipes.
            # For controller->container traffic this runs over FD 0 (`stdin`)
            # and for the container->controller traffic, this runs over FD 3.
            #
            # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for
            # container->controller traffic, so that components running in the
            # container which happen to write to `stdout` or `stderr` do not
            # interfere with the container-controller communication.
            childFDs = {0: "w", 1: "r", 2: "r", 3: "r"}

        ep = WorkerProcessEndpoint(
            self._node._reactor, exe, args, env=worker_env, worker=worker,
            childFDs=childFDs)

        # ready handling
        #
        def on_ready_success(id):
            self.log.info("{worker} with ID '{id}' and PID {pid} started",
                          worker=worker_logname, id=worker.id, pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before', 'shutdown',
                self._cleanup_worker, self._node._reactor, worker,
            )

            worker.status = 'started'
            worker.started = datetime.utcnow()

            started_info = {
                u'id': worker.id,
                u'status': worker.status,
                u'started': utcstr(worker.started),
                u'who': worker.who,
            }

            # FIXME: make start of stats printer dependent on log level ..
            worker.log_stats(5.)

            self.publish(started_topic, started_info, options=PublishOptions(exclude=details.caller))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]
            emsg = 'Failed to start native worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg, worker.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(_):
            self.log.info("Node worker {} ended successfully".format(worker.id))
            worker.log_stats(0)
            del self._workers[worker.id]
            return True

        def on_exit_error(err):
            self.log.info("Node worker {} ended with error ({})".format(worker.id, err))
            worker.log_stats(0)
            del self._workers[worker.id]
            return False

        def check_for_shutdown(was_successful):
            shutdown = False

            # automatically shutdown node whenever a worker ended (successfully, or with error)
            #
            if checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info("Node worker ended, and trigger '{}' active".format(checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT))
                shutdown = True

            # automatically shutdown node when worker ended with error
            #
            if not was_successful and checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers:
                self.log.info("Node worker ended with error, and trigger '{}' active".format(checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR))
                shutdown = True

            # automatically shutdown node when no more workers are left
            #
            if len(self._workers) == 0 and checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info("No more node workers running, and trigger '{}' active".format(checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT))
                shutdown = True

            # initiate shutdown (but only if we are not already shutting down)
            #
            if shutdown:
                if not self._shutdown_requested:
                    self.log.info("Node shutting down ..")
                    self.shutdown()
                else:
                    # ignore: shutdown already initiated ..
                    self.log.info("Node is already shutting down.")
            else:
                self.log.info("Node will continue to run (node shutdown triggers active: {})".format(self._node._node_shutdown_triggers))

        d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error)
        d_on_exit.addBoth(check_for_shutdown)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_native_worker_client_factory(self._node._router_session_factory, worker.ready, worker.exit)
        transport_factory.noisy = False
        self._workers[id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            u'id': id,
            u'status': worker.status,
            u'created': utcstr(worker.created),
            u'who': worker.who,
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic, starting_info, options=PublishOptions(exclude=details.caller))

        # now actually fork the worker ..
        #
        self.log.info("Starting {worker} with ID '{id}'...",
                      worker=worker_logname, id=id)
        self.log.debug("{worker} '{id}' command line is '{cmdline}'",
                       worker=worker_logname, id=id, cmdline=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will errback ..

            pid = proto.transport.pid
            self.log.debug("Native worker process connected with PID {pid}",
                           pid=pid)

            # note the PID of the worker
            worker.pid = pid

            # proto is an instance of NativeWorkerClientProtocol
            worker.proto = proto

            worker.status = 'connected'
            worker.connected = datetime.utcnow()

        def on_connect_error(err):

            # not sure when this errback is triggered at all ..
            self.log.error("Interal error: connection to forked native worker failed ({err})", err=err)

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready
Exemple #14
0
 def test_set_global(self):
     """
     Setting the global log level via the function changes it.
     """
     set_global_log_level("warn")
     self.assertEqual(get_global_log_level(), "warn")
Exemple #15
0
 def setUp(self):
     self.existing_level = get_global_log_level()
     return super(LoggerModuleTests, self).setUp()
Exemple #16
0
    def _start_native_worker(self, worker_type, worker_id, worker_options=None, details=None):

        # prohibit starting a worker twice
        #
        if worker_id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(worker_id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running', emsg)

        # check worker options
        #
        options = worker_options or {}
        try:
            if worker_type in self._node._native_workers:
                if self._node._native_workers[worker_type]['checkconfig_options']:
                    self._node._native_workers[worker_type]['checkconfig_options'](options)
                else:
                    raise Exception('No checkconfig_options for worker type "{worker_type}" implemented!'.format(worker_type=worker_type))
            else:
                raise Exception('invalid worker type "{}"'.format(worker_type))
        except Exception as e:
            emsg = "Could not start native worker: invalid configuration ({})".format(e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)

        # the fully qualified worker class as a string
        worker_class = qual(self._node._native_workers[worker_type]['worker_class'])

        # allow override Python executable from options
        #
        if 'python' in options:
            exe = options['python']

            # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy
            #
            if not os.path.isabs(exe):
                emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format(exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)

            # of course the path must exist and actually be executable
            #
            if not (os.path.isfile(exe) and os.access(exe, os.X_OK)):
                emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format(exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration', emsg)
        else:
            exe = sys.executable

        # allow override default Python module search paths from options
        #
        if 'pythonpath' in options:
            pythonpaths_to_add = [os.path.abspath(os.path.join(self._node._cbdir, p)) for p in options.get('pythonpath', [])]
        else:
            pythonpaths_to_add = []

        # assemble command line for forking the worker
        #
        # all native workers (routers and containers for now) start
        # from the same script in crossbar/worker/process.py -- we're
        # invoking via "-m" so that .pyc files, __pycache__ etc work
        # properly.
        #
        args = [exe, "-u", "-m", "crossbar.worker.process"]
        args.extend(["--cbdir", self._node._cbdir])
        args.extend(["--node", str(self._node._node_id)])
        args.extend(["--worker", str(worker_id)])
        args.extend(["--realm", self._realm])
        args.extend(["--klass", worker_class])
        args.extend(["--loglevel", get_global_log_level()])
        if "shutdown" in options:
            args.extend(["--shutdown", options["shutdown"]])

        # Node-level callback to inject worker arguments
        #
        self._node._extend_worker_args(args, options)

        # allow override worker process title from options
        #
        if options.get('title', None):
            args.extend(['--title', options['title']])

        # forward explicit reactor selection
        #
        if 'reactor' in options and sys.platform in options['reactor']:
            args.extend(['--reactor', options['reactor'][sys.platform]])
        # FIXME
        # elif self._node.options.reactor:
        #    args.extend(['--reactor', self._node.options.reactor])

        # create worker process environment
        #
        worker_env = create_process_env(options)

        # We need to use the same PYTHONPATH we were started with, so we can
        # find the Crossbar we're working with -- it may not be the same as the
        # one on the default path
        worker_env["PYTHONPATH"] = os.pathsep.join(pythonpaths_to_add + sys.path)

        # log name of worker
        #
        worker_logname = self._node._native_workers[worker_type]['logname']

        # each worker is run under its own dedicated WAMP auth role
        #
        worker_auth_role = u'crossbar.worker.{}'.format(worker_id)

        # topic URIs used (later)
        #
        starting_topic = self._node._native_workers[worker_type]['topics']['starting']
        started_topic = self._node._native_workers[worker_type]['topics']['started']

        # add worker tracking instance to the worker map ..
        #
        WORKER = self._node._native_workers[worker_type]['class']
        worker = WORKER(self, worker_id, details.caller, keeplog=options.get('traceback', None))
        self._workers[worker_id] = worker

        # create a (custom) process endpoint.
        #
        if platform.isWindows():
            childFDs = None  # Use the default Twisted ones
        else:
            # The communication between controller and container workers is
            # using WAMP running over 2 pipes.
            # For controller->container traffic this runs over FD 0 (`stdin`)
            # and for the container->controller traffic, this runs over FD 3.
            #
            # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for
            # container->controller traffic, so that components running in the
            # container which happen to write to `stdout` or `stderr` do not
            # interfere with the container-controller communication.
            childFDs = {0: "w", 1: "r", 2: "r", 3: "r"}

        ep = WorkerProcessEndpoint(
            self._node._reactor, exe, args, env=worker_env, worker=worker,
            childFDs=childFDs)

        # ready handling
        #
        def on_ready_success(worker_id):
            self.log.info('{worker_type} worker "{worker_id}" process {pid} started',
                          worker_type=worker_logname, worker_id=worker.id, pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before', 'shutdown',
                self._cleanup_worker, self._node._reactor, worker,
            )

            worker.on_worker_started()

            started_info = {
                u'id': worker.id,
                u'status': worker.status,
                u'started': utcstr(worker.started),
                u'who': worker.who,
            }

            # FIXME: make start of stats printer dependent on log level ..
            if False:
                worker.log_stats(5.)

            self.publish(started_topic, started_info, options=PublishOptions(exclude=details.caller))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]
            emsg = 'Failed to start native worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg, worker.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(_):
            self.log.info("Node worker {worker.id} ended successfully", worker=worker)

            # clear worker log
            worker.log_stats(0)

            # remove the dedicated node router authrole we dynamically
            # added for the worker
            self._node._drop_worker_role(worker_auth_role)

            # remove our metadata tracking for the worker
            del self._workers[worker.id]

            # indicate that the worker excited successfully
            return True

        def on_exit_error(err):
            self.log.info("Node worker {worker.id} ended with error ({err})", worker=worker, err=err)

            # clear worker log
            worker.log_stats(0)

            # remove the dedicated node router authrole we dynamically
            # added for the worker
            self._node._drop_worker_role(worker_auth_role)

            # remove our metadata tracking for the worker
            del self._workers[worker.id]

            # indicate that the worker excited with error
            return False

        def check_for_shutdown(was_successful):
            self.log.info('Checking for node shutdown: worker_exit_success={worker_exit_success}, shutdown_requested={shutdown_requested}, node_shutdown_triggers={node_shutdown_triggers}', worker_exit_success=was_successful, shutdown_requested=self._shutdown_requested, node_shutdown_triggers=self._node._node_shutdown_triggers)

            shutdown = self._shutdown_requested

            # automatically shutdown node whenever a worker ended (successfully, or with error)
            #
            if checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info("Node worker ended, and trigger '{trigger}' active", trigger=checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT)
                shutdown = True

            # automatically shutdown node when worker ended with error
            #
            if not was_successful and checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers:
                self.log.info("Node worker ended with error, and trigger '{trigger}' active", trigger=checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR)
                shutdown = True

            # automatically shutdown node when no more workers are left
            #
            if len(self._workers) == 0 and checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info("No more node workers running, and trigger '{trigger}' active", trigger=checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT)
                shutdown = True

            # initiate shutdown (but only if we are not already shutting down)
            #
            if shutdown:
                self.shutdown()
            else:
                self.log.info('Node will continue to run!')

        d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error)
        d_on_exit.addBoth(check_for_shutdown)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_native_worker_client_factory(self._node._router_session_factory, worker_auth_role, worker.ready, worker.exit)
        transport_factory.noisy = False
        self._workers[worker_id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            u'id': worker_id,
            u'status': worker.status,
            u'created': utcstr(worker.created),
            u'who': worker.who,
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic, starting_info, options=PublishOptions(exclude=details.caller))

        # now actually fork the worker ..
        #
        self.log.info('{worker_logname} worker "{worker_id}" starting ..',
                      worker_logname=worker_logname, worker_id=worker_id)
        self.log.debug('{worker_logname} "{worker_id}" command line is "{cmdline}"',
                       worker_logname=worker_logname, worker_id=worker_id, cmdline=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will errback ..

            self.log.debug('Native worker "{worker_id}" connected',
                           worker_id=worker_id)

            worker.on_worker_connected(proto)

            # dynamically add a dedicated authrole to the router
            # for the worker we've just started
            self._node._add_worker_role(worker_auth_role, options)

        def on_connect_error(err):

            # not sure when this errback is triggered at all ..
            self.log.error("Interal error: connection to forked native worker failed ({err})", err=err)

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready
Exemple #17
0
    def _start_native_worker(self, wtype, id, options=None, details=None):

        assert (wtype in ['router', 'container', 'websocket-testee'])

        # prohibit starting a worker twice
        #
        if id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(
                id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running',
                                   emsg)

        # check worker options
        #
        options = options or {}
        try:
            if wtype == 'router':
                checkconfig.check_router_options(options)
            elif wtype == 'container':
                checkconfig.check_container_options(options)
            elif wtype == 'websocket-testee':
                checkconfig.check_websocket_testee_options(options)
            else:
                raise Exception("logic error")
        except Exception as e:
            emsg = "Could not start native worker: invalid configuration ({})".format(
                e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration',
                                   emsg)

        # allow override Python executable from options
        #
        if 'python' in options:
            exe = options['python']

            # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy
            #
            if not os.path.isabs(exe):
                emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)

            # of course the path must exist and actually be executable
            #
            if not (os.path.isfile(exe) and os.access(exe, os.X_OK)):
                emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)
        else:
            exe = sys.executable

        # assemble command line for forking the worker
        #
        # all native workers (routers and containers for now) start
        # from the same script in crossbar/worker/process.py -- we're
        # invoking via "-m" so that .pyc files, __pycache__ etc work
        # properly.

        args = [exe, "-u", "-m", "crossbar.worker.process"]
        args.extend(["--cbdir", self._node._cbdir])
        args.extend(["--node", str(self._node_id)])
        args.extend(["--worker", str(id)])
        args.extend(["--realm", self._realm])
        args.extend(["--type", wtype])
        args.extend(["--loglevel", get_global_log_level()])

        # allow override worker process title from options
        #
        if options.get('title', None):
            args.extend(['--title', options['title']])

        # forward explicit reactor selection
        #
        if 'reactor' in options and sys.platform in options['reactor']:
            args.extend(['--reactor', options['reactor'][sys.platform]])
        # FIXME
        # elif self._node.options.reactor:
        #    args.extend(['--reactor', self._node.options.reactor])

        # create worker process environment
        #
        worker_env = create_process_env(options)

        # We need to use the same PYTHONPATH we were started with, so we can
        # find the Crossbar we're working with -- it may not be the same as the
        # one on the default path
        worker_env["PYTHONPATH"] = os.pathsep.join(sys.path)

        # log name of worker
        #
        worker_logname = {
            'router': 'Router',
            'container': 'Container',
            'websocket-testee': 'WebSocketTestee'
        }.get(wtype, 'Worker')

        # topic URIs used (later)
        #
        if wtype == 'router':
            starting_topic = 'crossbar.node.{}.on_router_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_router_started'.format(
                self._node_id)
        elif wtype == 'container':
            starting_topic = 'crossbar.node.{}.on_container_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_container_started'.format(
                self._node_id)
        elif wtype == 'websocket-testee':
            starting_topic = 'crossbar.node.{}.on_websocket_testee_starting'.format(
                self._node_id)
            started_topic = 'crossbar.node.{}.on_websocket_testee_started'.format(
                self._node_id)
        else:
            raise Exception("logic error")

        # add worker tracking instance to the worker map ..
        #
        if wtype == 'router':
            worker = RouterWorkerProcess(self,
                                         id,
                                         details.caller,
                                         keeplog=options.get(
                                             'traceback', None))
        elif wtype == 'container':
            worker = ContainerWorkerProcess(self,
                                            id,
                                            details.caller,
                                            keeplog=options.get(
                                                'traceback', None))
        elif wtype == 'websocket-testee':
            worker = WebSocketTesteeWorkerProcess(self,
                                                  id,
                                                  details.caller,
                                                  keeplog=options.get(
                                                      'traceback', None))
        else:
            raise Exception("logic error")

        self._workers[id] = worker

        # create a (custom) process endpoint.
        #
        if platform.isWindows():
            childFDs = None  # Use the default Twisted ones
        else:
            # The communication between controller and container workers is
            # using WAMP running over 2 pipes.
            # For controller->container traffic this runs over FD 0 (`stdin`)
            # and for the container->controller traffic, this runs over FD 3.
            #
            # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for
            # container->controller traffic, so that components running in the
            # container which happen to write to `stdout` or `stderr` do not
            # interfere with the container-controller communication.
            childFDs = {0: "w", 1: "r", 2: "r", 3: "r"}

        ep = WorkerProcessEndpoint(self._node._reactor,
                                   exe,
                                   args,
                                   env=worker_env,
                                   worker=worker,
                                   childFDs=childFDs)

        # ready handling
        #
        def on_ready_success(id):
            self.log.info("{worker} with ID '{id}' and PID {pid} started",
                          worker=worker_logname,
                          id=worker.id,
                          pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before',
                'shutdown',
                self._cleanup_worker,
                self._node._reactor,
                worker,
            )

            worker.status = 'started'
            worker.started = datetime.utcnow()

            started_info = {
                u'id': worker.id,
                u'status': worker.status,
                u'started': utcstr(worker.started),
                u'who': worker.who,
            }

            # FIXME: make start of stats printer dependent on log level ..
            worker.log_stats(5.)

            self.publish(started_topic,
                         started_info,
                         options=PublishOptions(exclude=details.caller))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]
            emsg = 'Failed to start native worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg,
                                   worker.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(_):
            self.log.info("Node worker {} ended successfully".format(
                worker.id))
            worker.log_stats(0)
            del self._workers[worker.id]
            return True

        def on_exit_error(err):
            self.log.info("Node worker {} ended with error ({})".format(
                worker.id, err))
            worker.log_stats(0)
            del self._workers[worker.id]
            return False

        def check_for_shutdown(was_successful):
            shutdown = False

            # automatically shutdown node whenever a worker ended (successfully, or with error)
            #
            if checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended, and trigger '{}' active".format(
                        checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT))
                shutdown = True

            # automatically shutdown node when worker ended with error
            #
            if not was_successful and checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended with error, and trigger '{}' active".
                    format(
                        checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR))
                shutdown = True

            # automatically shutdown node when no more workers are left
            #
            if len(
                    self._workers
            ) == 0 and checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "No more node workers running, and trigger '{}' active".
                    format(checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT))
                shutdown = True

            # initiate shutdown (but only if we are not already shutting down)
            #
            if shutdown:
                if not self._shutdown_requested:
                    self.log.info("Node shutting down ..")
                    self.shutdown()
                else:
                    # ignore: shutdown already initiated ..
                    self.log.info("Node is already shutting down.")
            else:
                self.log.info(
                    "Node will continue to run (node shutdown triggers active: {})"
                    .format(self._node._node_shutdown_triggers))

        d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error)
        d_on_exit.addBoth(check_for_shutdown)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_native_worker_client_factory(
            self._node._router_session_factory, worker.ready, worker.exit)
        transport_factory.noisy = False
        self._workers[id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            u'id': id,
            u'status': worker.status,
            u'created': utcstr(worker.created),
            u'who': worker.who,
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic,
                     starting_info,
                     options=PublishOptions(exclude=details.caller))

        # now actually fork the worker ..
        #
        self.log.info("Starting {worker} with ID '{id}'...",
                      worker=worker_logname,
                      id=id)
        self.log.debug("{worker} '{id}' command line is '{cmdline}'",
                       worker=worker_logname,
                       id=id,
                       cmdline=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will errback ..

            pid = proto.transport.pid
            self.log.debug("Native worker process connected with PID {pid}",
                           pid=pid)

            # note the PID of the worker
            worker.pid = pid

            # proto is an instance of NativeWorkerClientProtocol
            worker.proto = proto

            worker.status = 'connected'
            worker.connected = datetime.utcnow()

        def on_connect_error(err):

            # not sure when this errback is triggered at all ..
            self.log.error(
                "Interal error: connection to forked native worker failed ({err})",
                err=err)

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready
 def __init__(self, level="debug"):
     self.logs = []
     self._old_log_level = get_global_log_level()
     self.desired_level = level
Exemple #19
0
    def _start_native_worker(self,
                             worker_type,
                             worker_id,
                             worker_options=None,
                             details=None):

        # prohibit starting a worker twice
        #
        if worker_id in self._workers:
            emsg = "Could not start worker: a worker with ID '{}' is already running (or starting)".format(
                worker_id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.worker_already_running',
                                   emsg)

        # check worker options
        #
        options = worker_options or {}
        try:
            if worker_type in self._node._native_workers:
                if self._node._native_workers[worker_type][
                        'checkconfig_options']:
                    self._node._native_workers[worker_type][
                        'checkconfig_options'](options)
                else:
                    raise Exception(
                        'No checkconfig_options for worker type "{worker_type}" implemented!'
                        .format(worker_type=worker_type))
            else:
                raise Exception('invalid worker type "{}"'.format(worker_type))
        except Exception as e:
            emsg = "Could not start native worker: invalid configuration ({})".format(
                e)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.invalid_configuration',
                                   emsg)

        # the fully qualified worker class as a string
        worker_class = qual(
            self._node._native_workers[worker_type]['worker_class'])

        # allow override Python executable from options
        #
        if 'python' in options:
            exe = options['python']

            # the executable must be an absolute path, e.g. /home/oberstet/pypy-2.2.1-linux64/bin/pypy
            #
            if not os.path.isabs(exe):
                emsg = "Invalid worker configuration: python executable '{}' must be an absolute path".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)

            # of course the path must exist and actually be executable
            #
            if not (os.path.isfile(exe) and os.access(exe, os.X_OK)):
                emsg = "Invalid worker configuration: python executable '{}' does not exist or isn't an executable".format(
                    exe)
                self.log.error(emsg)
                raise ApplicationError(u'crossbar.error.invalid_configuration',
                                       emsg)
        else:
            exe = sys.executable

        # allow override default Python module search paths from options
        #
        if 'pythonpath' in options:
            pythonpaths_to_add = [
                os.path.abspath(os.path.join(self._node._cbdir, p))
                for p in options.get('pythonpath', [])
            ]
        else:
            pythonpaths_to_add = []

        # assemble command line for forking the worker
        #
        # all native workers (routers and containers for now) start
        # from the same script in crossbar/worker/process.py -- we're
        # invoking via "-m" so that .pyc files, __pycache__ etc work
        # properly.
        #
        args = [exe, "-u", "-m", "crossbar.worker.process"]
        args.extend(["--cbdir", self._node._cbdir])
        args.extend(["--worker", str(worker_id)])
        args.extend(["--realm", self._realm])
        args.extend(["--klass", worker_class])
        args.extend(["--loglevel", get_global_log_level()])
        if "shutdown" in options:
            args.extend(["--shutdown", options["shutdown"]])

        # Node-level callback to inject worker arguments
        #
        self._node._extend_worker_args(args, options)

        # allow override worker process title from options
        #
        if options.get('title', None):
            args.extend(['--title', options['title']])

        # forward explicit reactor selection
        #
        if 'reactor' in options and sys.platform in options['reactor']:
            args.extend(['--reactor', options['reactor'][sys.platform]])
        # FIXME
        # elif self._node.options.reactor:
        #    args.extend(['--reactor', self._node.options.reactor])

        # create worker process environment
        #
        worker_env = create_process_env(options)

        # We need to use the same PYTHONPATH we were started with, so we can
        # find the Crossbar we're working with -- it may not be the same as the
        # one on the default path
        worker_env["PYTHONPATH"] = os.pathsep.join(pythonpaths_to_add +
                                                   sys.path)

        # log name of worker
        #
        worker_logname = self._node._native_workers[worker_type]['logname']

        # each worker is run under its own dedicated WAMP auth role
        #
        worker_auth_role = u'crossbar.worker.{}'.format(worker_id)

        # topic URIs used (later)
        #
        starting_topic = self._node._native_workers[worker_type]['topics'][
            'starting']
        started_topic = self._node._native_workers[worker_type]['topics'][
            'started']

        # add worker tracking instance to the worker map ..
        #
        WORKER = self._node._native_workers[worker_type]['class']
        worker = WORKER(self,
                        worker_id,
                        details.caller,
                        keeplog=options.get('traceback', None))
        self._workers[worker_id] = worker

        # create a (custom) process endpoint.
        #
        if platform.isWindows():
            childFDs = None  # Use the default Twisted ones
        else:
            # The communication between controller and container workers is
            # using WAMP running over 2 pipes.
            # For controller->container traffic this runs over FD 0 (`stdin`)
            # and for the container->controller traffic, this runs over FD 3.
            #
            # Note: We use FD 3, not FD 1 (`stdout`) or FD 2 (`stderr`) for
            # container->controller traffic, so that components running in the
            # container which happen to write to `stdout` or `stderr` do not
            # interfere with the container-controller communication.
            childFDs = {0: "w", 1: "r", 2: "r", 3: "r"}

        ep = WorkerProcessEndpoint(self._node._reactor,
                                   exe,
                                   args,
                                   env=worker_env,
                                   worker=worker,
                                   childFDs=childFDs)

        # ready handling
        #
        def on_ready_success(worker_id):
            self.log.info(
                '{worker_type} worker "{worker_id}" process {pid} started',
                worker_type=worker_logname,
                worker_id=worker.id,
                pid=worker.pid)

            self._node._reactor.addSystemEventTrigger(
                'before',
                'shutdown',
                self._cleanup_worker,
                self._node._reactor,
                worker,
            )

            worker.on_worker_started()

            started_info = {
                u'id': worker.id,
                u'status': worker.status,
                u'started': utcstr(worker.started),
                u'who': worker.who,
            }

            # FIXME: make start of stats printer dependent on log level ..
            if False:
                worker.log_stats(5.)

            self.publish(started_topic,
                         started_info,
                         options=PublishOptions(exclude=details.caller))

            return started_info

        def on_ready_error(err):
            del self._workers[worker.id]
            emsg = 'Failed to start native worker: {}'.format(err.value)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.cannot_start", emsg,
                                   worker.getlog())

        worker.ready.addCallbacks(on_ready_success, on_ready_error)

        def on_exit_success(_):
            self.log.info("Node worker {worker.id} ended successfully",
                          worker=worker)

            # clear worker log
            worker.log_stats(0)

            # remove the dedicated node router authrole we dynamically
            # added for the worker
            self._node._drop_worker_role(worker_auth_role)

            # remove our metadata tracking for the worker
            del self._workers[worker.id]

            # indicate that the worker excited successfully
            return True

        def on_exit_error(err):
            self.log.info("Node worker {worker.id} ended with error ({err})",
                          worker=worker,
                          err=err)

            # clear worker log
            worker.log_stats(0)

            # remove the dedicated node router authrole we dynamically
            # added for the worker
            self._node._drop_worker_role(worker_auth_role)

            # remove our metadata tracking for the worker
            del self._workers[worker.id]

            # indicate that the worker excited with error
            return False

        def check_for_shutdown(was_successful):
            self.log.info(
                'Checking for node shutdown: worker_exit_success={worker_exit_success}, shutdown_requested={shutdown_requested}, node_shutdown_triggers={node_shutdown_triggers}',
                worker_exit_success=was_successful,
                shutdown_requested=self._shutdown_requested,
                node_shutdown_triggers=self._node._node_shutdown_triggers)

            shutdown = self._shutdown_requested

            # automatically shutdown node whenever a worker ended (successfully, or with error)
            #
            if checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended, and trigger '{trigger}' active",
                    trigger=checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT)
                shutdown = True

            # automatically shutdown node when worker ended with error
            #
            if not was_successful and checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR in self._node._node_shutdown_triggers:
                self.log.info(
                    "Node worker ended with error, and trigger '{trigger}' active",
                    trigger=checkconfig.NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR
                )
                shutdown = True

            # automatically shutdown node when no more workers are left
            #
            if len(
                    self._workers
            ) == 0 and checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT in self._node._node_shutdown_triggers:
                self.log.info(
                    "No more node workers running, and trigger '{trigger}' active",
                    trigger=checkconfig.NODE_SHUTDOWN_ON_LAST_WORKER_EXIT)
                shutdown = True

            # initiate shutdown (but only if we are not already shutting down)
            #
            if shutdown:
                self.shutdown()
            else:
                self.log.info('Node will continue to run!')

        d_on_exit = worker.exit.addCallbacks(on_exit_success, on_exit_error)
        d_on_exit.addBoth(check_for_shutdown)

        # create a transport factory for talking WAMP to the native worker
        #
        transport_factory = create_native_worker_client_factory(
            self._node._router_session_factory, worker_auth_role, worker.ready,
            worker.exit)
        transport_factory.noisy = False
        self._workers[worker_id].factory = transport_factory

        # now (immediately before actually forking) signal the starting of the worker
        #
        starting_info = {
            u'id': worker_id,
            u'status': worker.status,
            u'created': utcstr(worker.created),
            u'who': worker.who,
        }

        # the caller gets a progressive result ..
        if details.progress:
            details.progress(starting_info)

        # .. while all others get an event
        self.publish(starting_topic,
                     starting_info,
                     options=PublishOptions(exclude=details.caller))

        # now actually fork the worker ..
        #
        self.log.info('{worker_logname} worker "{worker_id}" starting ..',
                      worker_logname=worker_logname,
                      worker_id=worker_id)
        self.log.debug(
            '{worker_logname} "{worker_id}" command line is "{cmdline}"',
            worker_logname=worker_logname,
            worker_id=worker_id,
            cmdline=' '.join(args))

        d = ep.connect(transport_factory)

        def on_connect_success(proto):

            # this seems to be called immediately when the child process
            # has been forked. even if it then immediately fails because
            # e.g. the executable doesn't even exist. in other words,
            # I'm not sure under what conditions the deferred will errback ..

            self.log.debug('Native worker "{worker_id}" connected',
                           worker_id=worker_id)

            worker.on_worker_connected(proto)

            # dynamically add a dedicated authrole to the router
            # for the worker we've just started
            self._node._add_worker_role(worker_auth_role, options)

        def on_connect_error(err):

            # not sure when this errback is triggered at all ..
            self.log.error(
                "Interal error: connection to forked native worker failed ({err})",
                err=err)

            # in any case, forward the error ..
            worker.ready.errback(err)

        d.addCallbacks(on_connect_success, on_connect_error)

        return worker.ready
Exemple #20
0
 def __init__(self, level="debug"):
     self.logs = []
     self._old_log_level = get_global_log_level()
     self.desired_level = level