示例#1
0
文件: node.py 项目: yankos/crossbar
    def boot(self):
        self.log.info('Booting node {method}',
                      method=hltype(FabricCenterNode.boot))

        # get fabric controller configuration
        #
        controller_config_extra = self._config.get('controller', {}).get(
            'fabric-center', {})

        # apply node config
        #
        yield self.boot_from_config(self._config)
        self._local_config_applied = True

        # start node manager
        #
        extra = {
            'cbdir': self._cbdir,
            'database': controller_config_extra.get('database', {}),
            'ready': Deferred(),
        }
        config = ComponentConfig(self._realm, extra)
        self._bridge_session = FabricServiceNodeManager(
            config=config,
            reactor=self._reactor,
            personality=self.personality,
            node=self)
        router = self._router_factory.get(self._realm)
        self._router_session_factory.add(self._bridge_session,
                                         router,
                                         authrole='trusted')
        yield extra['ready']
示例#2
0
    def start_worker(self,
                     worker_id,
                     worker_type,
                     worker_options=None,
                     details=None):
        """
        Start a new worker process in the node.
        """
        if type(worker_id) != str or worker_id in ['controller', '']:
            raise Exception('invalid worker ID "{}"'.format(worker_id))

        self.log.info(
            'Starting {worker_type}-worker "{worker_id}" .. {worker_klass}',
            worker_type=hl(worker_type),
            worker_id=hlid(worker_id),
            worker_klass=hltype(NodeController.start_worker))

        if worker_type == 'guest':
            return self._start_guest_worker(worker_id,
                                            worker_options,
                                            details=details)

        elif worker_type in self._node._native_workers:
            return self._start_native_worker(worker_type,
                                             worker_id,
                                             worker_options,
                                             details=details)

        else:
            raise Exception('invalid worker type "{}"'.format(worker_type))
示例#3
0
文件: router.py 项目: om26er/crossbar
    def has_role(self, realm: str, authrole: str) -> bool:
        """
        Check if a role with the given name is currently running in the given realm.

        :param realm: WAMP realm (name, _not_ run-time ID).
        :type realm: str

        :param authrole: WAMP authentication role (URI, _not_ run-time ID).
        :type authrole: str

        :returns: True if realm is running.
        :rtype: bool
        """
        authrole = authrole or 'trusted'
        result = realm in self.realm_to_id and self.realm_to_id[
            realm] in self.realms
        if result:
            realm_id = self.realm_to_id[realm]
            result = (authrole in self.realms[realm_id].role_to_id
                      and self.realms[realm_id].role_to_id[authrole]
                      in self.realms[realm_id].roles)

            # note: this is to enable eg built-in "trusted" authrole
            result = result or authrole in self._service_sessions[realm]

        self.log.debug(
            '{func}(realm="{realm}", authrole="{authrole}") -> {result}',
            func=hltype(RouterController.has_role),
            realm=hlid(realm),
            authrole=hlid(authrole),
            result=hlval(result))
        return result
示例#4
0
文件: router.py 项目: om26er/crossbar
    def get_router_realm_link(self, realm_id, link_id, details=None):
        """
        Get router link detail information.

        :param realm_id: The ID of the (local) realm of the link.
        :type realm_id: str

        :param link_id: The ID of the router link to return.
        :type link_id: str

        :returns: Router link detail information.
        :rtype: dict
        """
        assert type(realm_id) == str
        assert type(link_id) == str
        assert isinstance(details, CallDetails)

        self.log.info('{method} Get router link {link_id} on realm {realm_id}',
                      link_id=hlid(link_id),
                      realm_id=hlid(realm_id),
                      method=hltype(RouterController.get_router_realm_link))

        if realm_id not in self.realms:
            raise ApplicationError("crossbar.error.no_such_object",
                                   "No realm with ID '{}'".format(realm_id))

        rlink_manager = self.realms[realm_id].rlink_manager

        if link_id not in rlink_manager:
            raise ApplicationError("crossbar.error.no_such_object",
                                   "No link with ID '{}'".format(link_id))

        rlink = rlink_manager[link_id]

        return rlink.marshal()
示例#5
0
    def onJoin(self, details):
        assert self.config.extra and 'on_ready' in self.config.extra
        assert self.config.extra and 'other' in self.config.extra

        remote = self.config.extra['other']
        assert isinstance(remote, RLinkRemoteSession)

        self._exclude_authid = self.config.extra.get('exclude_authid', None)
        self._exclude_authrole = self.config.extra.get('exclude_authrole',
                                                       None)

        # setup local->remote event forwarding
        forward_events = self.config.extra.get('forward_events', False)
        if forward_events:
            yield self._setup_event_forwarding(remote)

        # setup local->remote invocation forwarding
        forward_invocations = self.config.extra.get('forward_invocations',
                                                    False)
        if forward_invocations:
            yield self._setup_invocation_forwarding(remote)

        self.log.debug(
            'Router link local session ready (forward_events={forward_events}, forward_invocations={forward_invocations}, realm={realm}, authid={authid}, authrole={authrole}, session={session}) {method}',
            method=hltype(RLinkLocalSession.onJoin),
            forward_events=hluserid(forward_events),
            forward_invocations=hluserid(forward_invocations),
            realm=hluserid(details.realm),
            authid=hluserid(details.authid),
            authrole=hluserid(details.authrole),
            session=hlid(details.session))

        on_ready = self.config.extra.get('on_ready', None)
        if on_ready and not on_ready.called:
            self.config.extra['on_ready'].callback(self)
示例#6
0
    def start_worker(self,
                     worker_id,
                     worker_type,
                     worker_options=None,
                     details=None):
        """
        Start a new worker process in the node.
        """
        self.log.info(
            'Starting {worker_type} worker {worker_id} {worker_klass}',
            worker_type=worker_type,
            worker_id=hlid(worker_id),
            worker_klass=hltype(NodeController.start_worker))
        if worker_type == u'guest':
            return self._start_guest_worker(worker_id,
                                            worker_options,
                                            details=details)

        elif worker_type in self._node._native_workers:
            return self._start_native_worker(worker_type,
                                             worker_id,
                                             worker_options,
                                             details=details)

        else:
            raise Exception('invalid worker type "{}"'.format(worker_type))
示例#7
0
    def start_router_transport(self, transport_id, config, create_paths=False, details=None):
        """
        Start a transport on this router worker.

        :param transport_id: The ID of the transport to start.
        :type transport_id: str

        :param config: The transport configuration.
        :type config: dict

        :param create_paths: If set, start subservices defined in the configuration too.
            This currently only applies to Web services, which are part of a Web transport.
        :type create_paths: bool

        :param details: Call details.
        :type details: :class:`autobahn.wamp.types.CallDetails`
        """
        self.log.info('Starting router transport "{transport_id}" {method}',
                      transport_id=transport_id, method=hltype(self.start_router_transport))

        # prohibit starting a transport twice
        if transport_id in self.transports:
            _emsg = 'Could not start transport: a transport with ID "{}" is already running (or starting)'.format(transport_id)
            self.log.error(_emsg)
            raise ApplicationError(u'crossbar.error.already_running', _emsg)

        # create a transport and parse the transport configuration
        router_transport = self.personality.create_router_transport(self, transport_id, config)

        caller = details.caller if details else None
        event = {
            u'id': transport_id
        }
        topic = u'{}.on_router_transport_starting'.format(self._uri_prefix)
        self.publish(topic, event, options=PublishOptions(exclude=caller))

        # start listening ..
        d = router_transport.start(create_paths)

        def ok(_):
            self.transports[transport_id] = router_transport
            self.log.debug('Router transport "{transport_id}" started and listening', transport_id=transport_id)

            topic = u'{}.on_router_transport_started'.format(self._uri_prefix)
            self.publish(topic, event, options=PublishOptions(exclude=caller))

            return router_transport.marshal()

        def fail(err):
            _emsg = "Cannot listen on transport endpoint: {log_failure}"
            self.log.error(_emsg, log_failure=err)

            topic = u'{}.on_router_transport_stopped'.format(self._uri_prefix)
            self.publish(topic, event, options=PublishOptions(exclude=caller))

            raise ApplicationError(u"crossbar.error.cannot_listen", _emsg)

        d.addCallbacks(ok, fail)
        return d
示例#8
0
    def start_router_transport(self, transport_id, config, create_paths=False, details=None):
        """
        Start a transport on this router worker.

        :param transport_id: The ID of the transport to start.
        :type transport_id: str

        :param config: The transport configuration.
        :type config: dict

        :param create_paths: If set, start subservices defined in the configuration too.
            This currently only applies to Web services, which are part of a Web transport.
        :type create_paths: bool

        :param details: Call details.
        :type details: :class:`autobahn.wamp.types.CallDetails`
        """
        self.log.info('Starting router transport "{transport_id}" {method}',
                      transport_id=transport_id, method=hltype(self.start_router_transport))

        # prohibit starting a transport twice
        if transport_id in self.transports:
            _emsg = 'Could not start transport: a transport with ID "{}" is already running (or starting)'.format(transport_id)
            self.log.error(_emsg)
            raise ApplicationError('crossbar.error.already_running', _emsg)

        # create a transport and parse the transport configuration
        router_transport = self.personality.create_router_transport(self, transport_id, config)

        caller = details.caller if details else None
        event = {
            'id': transport_id
        }
        topic = '{}.on_router_transport_starting'.format(self._uri_prefix)
        self.publish(topic, event, options=PublishOptions(exclude=caller))

        # start listening ..
        d = router_transport.start(create_paths)

        def ok(_):
            self.transports[transport_id] = router_transport
            self.log.debug('Router transport "{transport_id}" started and listening', transport_id=transport_id)

            topic = '{}.on_router_transport_started'.format(self._uri_prefix)
            self.publish(topic, event, options=PublishOptions(exclude=caller))

            return router_transport.marshal()

        def fail(err):
            _emsg = "Cannot listen on transport endpoint: {log_failure}"
            self.log.error(_emsg, log_failure=err)

            topic = '{}.on_router_transport_stopped'.format(self._uri_prefix)
            self.publish(topic, event, options=PublishOptions(exclude=caller))

            raise ApplicationError("crossbar.error.cannot_listen", _emsg)

        d.addCallbacks(ok, fail)
        return d
示例#9
0
 def __init__(self, *args, **kwargs):
     self.log.debug('{func}(*args={_args}, **kwargs={_kwargs})',
                    _args=args,
                    _kwargs=kwargs,
                    func=hltype(NativeWorkerClientFactory.__init__))
     self._authrole = kwargs.pop('authrole')
     WampWebSocketClientFactory.__init__(self, *args, **kwargs)
     self.proto = None
示例#10
0
 def connect_success(session):
     self.log.debug(
         'authenticator service session {session_id} attached to realm "{realm}" with authrole "{authrole}" {func}',
         func=hltype(self._init_dynamic_authenticator),
         session_id=hlid(session._session_id),
         authrole=hlid(session._authrole),
         realm=hlid(session._realm))
     self._authenticator_session = session
     d_ready.callback(None)
示例#11
0
 def start(self, node_id=None):
     self.log.info(
         '{note} [{method}]',
         note=hl('Starting node (initialize master-node personality) ..',
                 color='green',
                 bold=True),
         method=hltype(FabricCenterNode.start))
     res = yield node.FabricNode.start(self, node_id)
     return res
示例#12
0
 def set_service_session(self, session, realm, authrole=None):
     self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")',
                   func=hltype(self.set_service_session),
                   session=session,
                   realm=hlid(realm),
                   authrole=hlid(authrole))
     if realm not in self._service_sessions:
         self._service_sessions[realm] = {}
     self._service_sessions[realm][authrole] = session
示例#13
0
    def authenticate(self, signature: str) -> Union[Accept, Deny]:
        """
        The client has answered with a WAMP AUTHENTICATE message. Verify the message and accept or deny.

        :param signature: Signature over the challenge as received from the authenticating session.
        :returns: Either accept or deny the session.
        """
        raise NotImplementedError('{}(signature="{}")'.format(
            hltype(self.hello), signature))
示例#14
0
    def kill_by_authid(self, realm_id, authid, reason, message=None, details=None):
        self.log.info('Killing sessions by authid="{authid}" ..',
                      realm_id=hlid(realm_id), authid=hlid(authid),
                      method=hltype(RouterController.start_router_realm))

        if realm_id not in self.realms:
            raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(realm_id))

        # forward call directly to service agent
        return self.realms[realm_id].session.session_kill_by_authid(authid, reason, message=message, details=details)
示例#15
0
    def stop_web_transport_service(self, transport_id, path, details=None):
        """
        Stop a service on a Web transport.

        :param transport_id: The ID of the transport to stop the Web transport service on.
        :type transport_id: str

        :param path: The path (absolute URL, eg "/myservice1") of the service to stop.
        :type path: str

        :param details: Call details.
        :type details: :class:`autobahn.wamp.types.CallDetails`
        """
        self.log.info('{func}(transport_id={transport_id}, path="{path}")',
                      func=hltype(self.stop_web_transport_service),
                      transport_id=hlid(transport_id),
                      path=hlval(path))

        transport = self.transports.get(transport_id, None)
        if not transport or \
           not isinstance(transport, self.personality.RouterWebTransport) or \
           transport.state != self.personality.RouterTransport.STATE_STARTED:
            emsg = "Cannot stop service on Web transport: no transport with ID '{}' or transport is not a Web transport".format(
                transport_id)
            self.log.error(emsg)
            raise ApplicationError('crossbar.error.not_running', emsg)

        if path not in transport.root:
            emsg = "Cannot stop service on Web transport {}: no service running on path '{}'".format(
                transport_id, path)
            self.log.error(emsg)
            raise ApplicationError('crossbar.error.not_running', emsg)

        caller = details.caller if details else None
        self.publish(self._uri_prefix + '.on_web_transport_service_stopping',
                     transport_id,
                     path,
                     options=PublishOptions(exclude=caller))

        # now actually remove the web service. note: currently this is NOT async, but direct/sync.
        # FIXME: check that the underlying Twisted Web resource doesn't need any stopping too!
        del transport.root[path]

        on_web_transport_service_stopped = {
            'transport_id': transport_id,
            'path': path,
        }
        caller = details.caller if details else None
        self.publish(self._uri_prefix + '.on_web_transport_service_stopped',
                     transport_id,
                     path,
                     on_web_transport_service_stopped,
                     options=PublishOptions(exclude=caller))

        return on_web_transport_service_stopped
示例#16
0
    def onJoin(self, details):
        """
        Called when process has joined the node's management realm.
        """

        regs = yield self.register(
            self,
            prefix='{}.'.format(self._uri_prefix),
            options=RegisterOptions(details_arg='details'),
        )

        procs = []
        errors = []
        for reg in regs:
            if isinstance(reg, Failure):
                self.log.error("Failed to register management procedure: {f}",
                               f=reg,
                               log_failure=reg)
                errors.append(str(reg))
            else:
                procs.append(reg.procedure)

        if errors:
            raise ApplicationError('crossbar.error.cannot_start',
                                   'management API could not be initialized',
                                   errors=errors)
        else:
            self.log.debug(
                'Ok, registered {len_reg} management procedures on realm "{realm}" [{func}]:\n\n{procs}\n',
                len_reg=hlval(len(regs)),
                realm=hl(self.realm),
                func=hltype(self.onJoin),
                procs=hl(pformat(procs), color='white', bold=True))

        self.log.info(
            'Native worker ready! (worker={worker}, node_id="{node_id}", worker_id="{worker_id}") [{func}]',
            node_id=hlid(self._node_id),
            worker_id=hlid(self._worker_id),
            cbdir=hlval(self.cbdir),
            worker=hlid(self.__class__.__name__),
            func=hltype(self.onJoin))
        returnValue(regs)
示例#17
0
文件: rlink.py 项目: wp4613/crossbar
    def onLeave(self, details):
        self.log.warn(
            'Router link local session down! (realm={realm}, authid={authid}, authrole={authrole}, session={session}, details={details}) {method}',
            method=hltype(RLinkLocalSession.onLeave),
            realm=hluserid(self.config.realm),
            authid=hluserid(self._authid),
            authrole=hluserid(self._authrole),
            details=details,
            session=hlid(self._session_id))

        BridgeSession.onLeave(self, details)
示例#18
0
    def hello(self, realm: str,
              details: HelloDetails) -> Union[Accept, Deny, Challenge]:
        """
        When a HELLO message is received, this gets called to open the pending authentication.

        :param realm: The realm to client wishes to join (if the client did announce a realm).
        :param details: The details of the client provided for HELLO.
        :returns: Either return a challenge, or immediately accept or deny session.
        """
        raise NotImplementedError('{}(realm="{}", details={})'.format(
            hltype(self.hello), realm, details))
示例#19
0
文件: router.py 项目: yankos/crossbar
    def start_router_realm(self, realm_id, realm_config, details=None):
        self.log.info('Starting router realm "{realm_id}" {method}',
                      realm_id=realm_id,
                      method=hltype(ExtRouterController.start_router_realm))

        # activate this to test:
        if False and realm_config['name'] == 'realm1':
            self.log.info(hl('Auto-renaming realm1 to realm001', color='green', bold=True))
            realm_config['name'] = 'realm001'

        return RouterController.start_router_realm(self, realm_id, realm_config, details)
示例#20
0
 def get_service_session(self, realm, authrole=None):
     if realm in self._service_sessions:
         if authrole in self._service_sessions[realm]:
             session = self._service_sessions[realm][authrole]
             self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")',
                           func=hltype(self.get_service_session),
                           session=session,
                           realm=hlid(realm),
                           authrole=hlid(authrole))
             return succeed(session)
     return succeed(None)
示例#21
0
    def start(self, prefix):
        """
        Start the Metadata manager.

        :return:
        """
        assert self._started is None, 'cannot start Metadata manager - already running!'

        regs = yield self._session.register(self, prefix=prefix, options=RegisterOptions(details_arg='details'))
        self._prefix = prefix
        procs = [reg.procedure for reg in regs]
        self.log.debug('Mrealm controller {api} registered management procedures [{func}]:\n\n{procs}\n',
                       api=hl('Metadata manager API', color='green', bold=True),
                       func=hltype(self.start),
                       procs=hl(pformat(procs), color='white', bold=True))

        self._started = time_ns()
        self.log.info('Metadata manager ready for management realm {mrealm_oid}! [{func}]',
                      mrealm_oid=hlid(self._mrealm_oid),
                      func=hltype(self.start))
示例#22
0
    def start(self, node_id=None):
        self.log.info('{note} [{method}]',
                      note=hl('Starting node (initialize edge-node personality) ..', color='green', bold=True),
                      method=hltype(FabricNode.start))

        # run watchdog at 5Hz
        self._watchdog_looper = LoopingCall(self._watchdog)
        self._watchdog_looper.start(.2)

        res = yield node.Node.start(self, node_id or self._node_id)
        return res
示例#23
0
    def onLeave(self, details):
        self.log.warn(
            '{klass}.onLeave(): rlink remote session left! (realm={realm}, authid={authid}, authrole={authrole}, session={session}, details={details}) {method}',
            klass=self.__class__.__name__,
            method=hltype(RLinkLocalSession.onLeave),
            realm=hluserid(self.config.realm),
            authid=hluserid(self._authid),
            authrole=hluserid(self._authrole),
            session=hlid(self._session_id),
            details=details)

        BridgeSession.onLeave(self, details)
示例#24
0
 def _ready(s):
     # this is different from "self.config.controller._realm" !!
     self.log.info(
         'Container component ready: component_id="{component_id}", realm="{realm}", authrole="{authrole}", authid="{authid}", session={session} {func}',
         func=hltype(self.onJoin),
         component_id=hlid(component_id),
         realm=hlid(session._realm),
         authid=hlid(session._authid),
         authrole=hlid(session._authrole),
         session=hlid(session._session_id))
     if not joined_d.called:
         joined_d.callback(None)
示例#25
0
    def start_router_realm_role(self,
                                realm_id,
                                role_id,
                                role_config,
                                details=None):
        """
        Start a role on a realm running on this router worker.

        :param id: The ID of the realm the role should be started on.
        :type id: str

        :param role_id: The ID of the role to start under.
        :type role_id: str

        :param config: The role configuration.
        :type config: dict

        :param details: Call details.
        :type details: :class:`autobahn.wamp.types.CallDetails`
        """
        self.log.info(
            'Starting role "{role_id}" on realm "{realm_id}" {method}',
            role_id=role_id,
            realm_id=realm_id,
            method=hltype(self.start_router_realm_role))

        if realm_id not in self.realms:
            raise ApplicationError(u"crossbar.error.no_such_object",
                                   "No realm with ID '{}'".format(realm_id))

        if role_id in self.realms[realm_id].roles:
            raise ApplicationError(
                u"crossbar.error.already_exists",
                "A role with ID '{}' already exists in realm with ID '{}'".
                format(role_id, realm_id))

        self.realms[realm_id].roles[role_id] = RouterRealmRole(
            role_id, role_config)

        realm = self.realms[realm_id].config['name']
        self._router_factory.add_role(realm, role_config)

        topic = u'{}.on_router_realm_role_started'.format(self._uri_prefix)
        event = self.realms[realm_id].roles[role_id].marshal()
        caller = details.caller if details else None
        self.publish(topic, event, options=PublishOptions(exclude=caller))

        self.log.info('role {role_id} on realm {realm_id} started',
                      realm_id=realm_id,
                      role_id=role_id,
                      role_config=role_config)
        return event
示例#26
0
 def onJoin(self, details):
     # register our API on all configured sessions and then fire onready
     #
     on_ready = self.config.extra.get('onready',
                                      None) if self.config.extra else None
     try:
         for session, prefix, _ in self._expose_on_sessions:
             regs = yield session.register(
                 self,
                 options=RegisterOptions(details_arg='details'),
                 prefix=prefix)
             for reg in regs:
                 if isinstance(reg, Registration):
                     self.log.debug(
                         'Registered WAMP meta procedure <{proc}> on realm "{realm}"',
                         proc=reg.procedure,
                         realm=session._realm)
                 elif isinstance(reg, Failure):
                     err = reg.value
                     if isinstance(err, ApplicationError):
                         self.log.warn(
                             'Failed to register WAMP meta procedure on realm "{realm}": {error} ("{message}")',
                             realm=session._realm,
                             error=err.error,
                             message=err.error_message())
                     else:
                         self.log.warn(
                             'Failed to register WAMP meta procedure on realm "{realm}": {error}',
                             realm=session._realm,
                             error=str(err))
                 else:
                     self.log.warn(
                         'Failed to register WAMP meta procedure on realm "{realm}": {error}',
                         realm=session._realm,
                         error=str(reg))
     except Exception as e:
         self.log.failure()
         if on_ready:
             on_ready.errback(e)
         self.leave()
     else:
         self.log.info(
             '{func}: realm service session attached to realm "{realm}" [session_id={session_id}, authid="{authid}", authrole="{authrole}", on_ready={on_ready}]',
             func=hltype(self.onJoin),
             realm=hlid(details.realm),
             session_id=hlid(details.session),
             authid=hlid(details.authid),
             authrole=hlid(details.authrole),
             on_ready=on_ready,
         )
         if on_ready:
             on_ready.callback(self)
示例#27
0
def create_router_transport(worker, transport_id, config):
    """
    Factory for creating router (listening) transports.

    :param worker:
    :param transport_id:
    :param config:
    :return:
    """
    worker.log.info('Creating router transport for "{transport_id}" {factory}',
                    transport_id=transport_id,
                    factory=hltype(create_router_transport))

    if config['type'] == 'web' or (config['type'] == 'universal' and config.get('web', {})):
        transport = RouterWebTransport(worker, transport_id, config)
    else:
        transport = RouterTransport(worker, transport_id, config)

    worker.log.info('Router transport created for "{transport_id}" {transport_class}',
                    transport_id=transport_id,
                    transport_class=hltype(transport.__class__))
    return transport
示例#28
0
文件: router.py 项目: wp4613/crossbar
 def get_service_session(self, realm, authrole):
     authrole = authrole or 'trusted'
     session = None
     if realm in self._service_sessions:
         if authrole in self._service_sessions[realm]:
             session = self._service_sessions[realm][authrole]
     self.log.debug(
         '{func}(realm="{realm}", authrole="{authrole}") -> {session}',
         func=hltype(self.get_service_session),
         session=session,
         realm=hlid(realm),
         authrole=hlid(authrole))
     return succeed(session)
示例#29
0
def create_router_transport(worker, transport_id, config):
    """
    Factory for creating router (listening) transports.

    :param worker:
    :param transport_id:
    :param config:
    :return:
    """
    worker.log.info('Creating router transport for "{transport_id}" {factory}',
                    transport_id=transport_id,
                    factory=hltype(create_router_transport))

    if config['type'] == 'web' or (config['type'] == 'universal' and config.get('web', {})):
        transport = RouterWebTransport(worker, transport_id, config)
    else:
        transport = RouterTransport(worker, transport_id, config)

    worker.log.info('Router transport created for "{transport_id}" {transport_class}',
                    transport_id=transport_id,
                    transport_class=hltype(transport.__class__))
    return transport
示例#30
0
    def get_web_transport_services(self, transport_id, details=None):
        self.log.debug('{func}(transport_id={transport_id})',
                       func=hltype(self.get_web_transport_services),
                       transport_id=hlid(transport_id))

        transport = self.transports.get(transport_id, None)
        if not transport or \
           not isinstance(transport, self.personality.RouterWebTransport) or \
           transport.state != self.personality.RouterTransport.STATE_STARTED:
            emsg = "No transport with ID '{}' or transport is not a Web transport".format(transport_id)
            self.log.debug(emsg)
            raise ApplicationError('crossbar.error.not_running', emsg)

        return sorted(transport._config.get('paths', []))
示例#31
0
 def restart_component():
     # Think: if this below start_component() fails,
     # we'll still schedule *exactly one* new re-start
     # attempt for it, right?
     self.log.info(
         '{func}: now restarting previously closed component {component_id} automatically .. [restart_mode={restart_mode}, was_clean={was_clean}]',
         func=hltype(_component_closed),
         component_id=hlid(component_id),
         restart_mode=hlval(self._restart_mode),
         was_clean=hlval(was_clean))
     return self.start_component(
         component_id,
         config,
         reload_modules=reload_modules,
         details=details,
     )
示例#32
0
    def start_worker(self, worker_id, worker_type, worker_options=None, details=None):
        """
        Start a new worker process in the node.
        """
        self.log.info('Starting {worker_type} worker {worker_id} {worker_klass}',
                      worker_type=worker_type,
                      worker_id=hlid(worker_id),
                      worker_klass=hltype(NodeController.start_worker))
        if worker_type == u'guest':
            return self._start_guest_worker(worker_id, worker_options, details=details)

        elif worker_type in self._node._native_workers:
            return self._start_native_worker(worker_type, worker_id, worker_options, details=details)

        else:
            raise Exception('invalid worker type "{}"'.format(worker_type))
示例#33
0
    def onJoin(self, details, publish_ready=True):
        """
        Called when worker process has joined the node's management realm.
        """
        self.log.info('Router worker session for "{worker_id}" joined realm "{realm}" on node router {method}',
                      realm=self._realm,
                      worker_id=self._worker_id,
                      session_id=details.session,
                      method=hltype(RouterController.onJoin))

        yield WorkerController.onJoin(self, details, publish_ready=False)

        # WorkerController.publish_ready()
        self.publish_ready()

        self.log.info('Router worker session for "{worker_id}" ready',
                      worker_id=self._worker_id)
示例#34
0
    def onJoin(self, details, publish_ready=True):
        """
        Called when worker process has joined the node's management realm.
        """
        self.log.info('Router worker session for "{worker_id}" joined realm "{realm}" on node router {method}',
                      realm=self._realm,
                      worker_id=self._worker_id,
                      session_id=details.session,
                      method=hltype(RouterController.onJoin))

        yield WorkerController.onJoin(self, details, publish_ready=False)

        # WorkerController.publish_ready()
        self.publish_ready()

        self.log.info('Router worker session for "{worker_id}" ready',
                      worker_id=self._worker_id)
示例#35
0
    def start_router_realm_role(self, realm_id, role_id, role_config, details=None):
        """
        Start a role on a realm running on this router worker.

        :param id: The ID of the realm the role should be started on.
        :type id: str

        :param role_id: The ID of the role to start under.
        :type role_id: str

        :param config: The role configuration.
        :type config: dict

        :param details: Call details.
        :type details: autobahn.wamp.types.CallDetails
        """
        self.log.info('Starting role "{role_id}" on realm "{realm_id}" {method}',
                      role_id=role_id, realm_id=realm_id, method=hltype(self.start_router_realm_role))

        if realm_id not in self.realms:
            raise ApplicationError(u"crossbar.error.no_such_object", "No realm with ID '{}'".format(realm_id))

        if role_id in self.realms[realm_id].roles:
            raise ApplicationError(u"crossbar.error.already_exists", "A role with ID '{}' already exists in realm with ID '{}'".format(role_id, realm_id))

        self.realms[realm_id].roles[role_id] = RouterRealmRole(role_id, role_config)

        realm = self.realms[realm_id].config['name']
        self._router_factory.add_role(realm, role_config)

        topic = u'{}.on_router_realm_role_started'.format(self._uri_prefix)
        event = {
            u'id': role_id
        }
        caller = details.caller if details else None
        self.publish(topic, event, options=PublishOptions(exclude=caller))

        self.log.info('role {role_id} on realm {realm_id} started', realm_id=realm_id, role_id=role_id, role_config=role_config)
示例#36
0
文件: node.py 项目: goeddea/crossbar
 def boot(self):
     self.log.info('Booting node {method}', method=hltype(Node.boot))
     return self.boot_from_config(self._config)
示例#37
0
    def start_router_realm(self, realm_id, realm_config, details=None):
        """
        Starts a realm on this router worker.

        :param realm_id: The ID of the realm to start.
        :type realm_id: str

        :param realm_config: The realm configuration.
        :type realm_config: dict

        :param details: Call details.
        :type details: autobahn.wamp.types.CallDetails
        """
        self.log.info('Starting router realm {realm_id} {method}',
                      realm_id=hlid(realm_id), method=hltype(RouterController.start_router_realm))

        # prohibit starting a realm twice
        #
        if realm_id in self.realms:
            emsg = "Could not start realm: a realm with ID '{}' is already running (or starting)".format(realm_id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.already_running', emsg)

        # check configuration
        #
        try:
            self.personality.check_router_realm(self.personality, realm_config)
        except Exception as e:
            emsg = "Invalid router realm configuration: {}".format(e)
            self.log.error(emsg)
            raise ApplicationError(u"crossbar.error.invalid_configuration", emsg)

        # URI of the realm to start
        realm = realm_config['name']

        # router/realm wide options
        options = realm_config.get('options', {})

        enable_meta_api = options.get('enable_meta_api', True)

        # expose router/realm service API additionally on local node management router
        bridge_meta_api = options.get('bridge_meta_api', False)
        if bridge_meta_api:
            # FIXME
            bridge_meta_api_prefix = u'crossbar.worker.{worker_id}.realm.{realm_id}.root.'.format(worker_id=self._worker_id, realm_id=realm_id)
        else:
            bridge_meta_api_prefix = None

        # track realm
        rlm = self.router_realm_class(realm_id, realm_config)
        self.realms[realm_id] = rlm
        self.realm_to_id[realm] = realm_id

        # create a new router for the realm
        router = self._router_factory.start_realm(rlm)

        # add a router/realm service session
        extra = {
            # the RouterServiceAgent will fire this when it is ready
            'onready': Deferred(),

            # if True, forward the WAMP meta API (implemented by RouterServiceAgent)
            # that is normally only exposed on the app router/realm _additionally_
            # to the local node management router.
            'enable_meta_api': enable_meta_api,
            'bridge_meta_api': bridge_meta_api,
            'bridge_meta_api_prefix': bridge_meta_api_prefix,

            # the management session on the local node management router to which
            # the WAMP meta API is exposed to additionally, when the bridge_meta_api option is set
            'management_session': self,
        }
        cfg = ComponentConfig(realm, extra)
        rlm.session = RouterServiceAgent(cfg, router)
        self._router_session_factory.add(rlm.session, authrole=u'trusted')

        yield extra['onready']

        self.log.info('Realm "{realm_id}" (name="{realm_name}") started', realm_id=realm_id, realm_name=rlm.session._realm)

        self.publish(u'{}.on_realm_started'.format(self._uri_prefix), realm_id)
示例#38
0
    def start_web_transport_service(self, transport_id, path, config, details=None):
        """
        Start a service on a Web transport.

        :param transport_id: The ID of the transport to start the Web transport service on.
        :type transport_id: str

        :param path: The path (absolute URL, eg "/myservice1") on which to start the service.
        :type path: str

        :param config: The Web service configuration.
        :type config: dict

        :param details: Call details.
        :type details: autobahn.wamp.types.CallDetails
        """
        if type(config) != dict or 'type' not in config:
            raise ApplicationError(u'crossbar.invalid_argument', 'config parameter must be dict with type attribute')

        self.log.info('Starting "{service_type}" Web service on path "{path}" of transport "{transport_id}" {method}',
                      service_type=config.get('type', None),
                      path=path,
                      transport_id=transport_id,
                      method=hltype(self.start_web_transport_service))

        transport = self.transports.get(transport_id, None)
        if not transport:
            emsg = 'Cannot start service on transport: no transport with ID "{}"'.format(transport_id)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.not_running', emsg)

        if not isinstance(transport, self.personality.RouterWebTransport):
            emsg = 'Cannot start service on transport: transport is not a Web transport (transport_type={})'.format(hltype(transport.__class__))
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.not_running', emsg)

        if transport.state != self.personality.RouterTransport.STATE_STARTED:
            emsg = 'Cannot start service on Web transport service: transport is not running (transport_state={})'.format(
                transport_id, self.personality.RouterWebTransport.STATES.get(transport.state, None))
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.not_running', emsg)

        if path in transport.root:
            emsg = 'Cannot start service on Web transport "{}": a service is already running on path "{}"'.format(transport_id, path)
            self.log.error(emsg)
            raise ApplicationError(u'crossbar.error.already_running', emsg)

        caller = details.caller if details else None
        self.publish(self._uri_prefix + u'.on_web_transport_service_starting',
                     transport_id,
                     path,
                     options=PublishOptions(exclude=caller))

        # now actually add the web service ..
        # note: currently this is NOT async, but direct/sync.
        webservice_factory = self.personality.WEB_SERVICE_FACTORIES[config['type']]
        webservice = webservice_factory.create(transport, path, config)
        transport.root[path] = webservice

        on_web_transport_service_started = {
            u'transport_id': transport_id,
            u'path': path,
            u'config': config
        }
        caller = details.caller if details else None
        self.publish(self._uri_prefix + u'.on_web_transport_service_started',
                     transport_id,
                     path,
                     on_web_transport_service_started,
                     options=PublishOptions(exclude=caller))

        return on_web_transport_service_started
示例#39
0
文件: node.py 项目: goeddea/crossbar
    def start(self, node_id=None):
        """
        Starts this node. This will start a node controller and then spawn new worker
        processes as needed.
        """
        self.log.info('Starting {personality} node {method}',
                      personality=self.personality.NAME,
                      method=hltype(Node.start))

        # a configuration must have been loaded before
        if not self._config:
            raise Exception("No node configuration set")

        # a node can only be started once for now
        assert self._shutdown_complete is None
        assert self._node_id is None

        # get controller config/options
        controller_config = self._config.get('controller', {})
        controller_options = controller_config.get('options', {})

        # the node ID: CLI takes precedence over config over hostname
        if node_id:
            self._node_id = node_id
            _node_id_source = 'explicit run-time argument'
        elif 'id' in controller_config:
            self._node_id = controller_config['id']
            _node_id_source = 'explicit configuration'
        else:
            self._node_id = u'{}'.format(socket.gethostname()).lower()
            _node_id_source = 'hostname'
        self.log.info('Node ID {node_id} set from {node_id_source}',
                      node_id=hlid(self._node_id),
                      node_id_source=_node_id_source)

        # set controller process title
        try:
            import setproctitle
        except ImportError:
            self.log.warn("Warning, could not set process title (setproctitle not installed)")
        else:
            setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller'))

        # local node management router
        self._router_factory = RouterFactory(self._node_id, None)
        self._router_session_factory = RouterSessionFactory(self._router_factory)
        rlm_config = {
            'name': self._realm
        }
        rlm = RouterRealm(None, rlm_config)
        router = self._router_factory.start_realm(rlm)

        # setup global static roles
        self._add_global_roles()

        # always add a realm service session
        cfg = ComponentConfig(self._realm)
        rlm.session = (self.ROUTER_SERVICE)(cfg, router)
        self._router_session_factory.add(rlm.session, authrole=u'trusted')
        self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE))

        # add the node controller singleton component
        self._controller = self.NODE_CONTROLLER(self)

        self._router_session_factory.add(self._controller, authrole=u'trusted')
        self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER))

        # add extra node controller components
        self._add_extra_controller_components(controller_options)

        # setup Node shutdown triggers
        self._set_shutdown_triggers(controller_options)

        # setup node shutdown Deferred
        self._shutdown_complete = Deferred()

        # startup the node personality ..
        yield self.personality.Node.boot(self)

        # notify systemd that we are fully up and running
        try:
            import sdnotify
        except ImportError:
            # do nothing on non-systemd platforms
            pass
        else:
            sdnotify.SystemdNotifier().notify("READY=1")

        # return a shutdown deferred which we will fire to notify the code that
        # called start() - which is the main crossbar boot code
        res = {
            'shutdown_complete': self._shutdown_complete
        }
        returnValue(res)
示例#40
0
    def boot_from_config(self, config):
        """
        Startup elements in the node as specified in the provided node configuration.
        """
        # get controller configuration subpart
        controller = config.get('controller', {})
        parallel_worker_start = controller.get('options', {}).get('enable_parallel_worker_start', False)

        self.log.info('{bootmsg} {method}',
                      bootmsg=hl('Booting node from local configuration [parallel_worker_start={}] ..'.format(parallel_worker_start),
                                 color='green', bold=True),
                      method=hltype(Node.boot_from_config))

        # start Manhole in node controller
        if 'manhole' in controller:
            yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions())
            self.log.debug("controller: manhole started")

        # startup all workers
        workers = config.get('workers', [])
        if len(workers):
            self.log.info(hl('Will start {} worker{} ..'.format(len(workers), 's' if len(workers) > 1 else ''), color='green', bold=True))
        else:
            self.log.info(hl('No workers configured, nothing to do', color='green', bold=True))

        dl = []
        for worker in workers:

            # worker ID
            if 'id' in worker:
                worker_id = worker['id']
            else:
                worker_id = u'worker{:03d}'.format(self._worker_no)
                worker['id'] = worker_id
                self._worker_no += 1

            # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest')
            worker_type = worker['type']

            # native worker processes setup
            if worker_type in self._native_workers:

                # set logname depending on native worker type
                worker_logname = '{} {}'.format(self._native_workers[worker_type]['logname'], hlid(worker_id))

                # any worker specific options
                worker_options = worker.get('options', {})

                # start the (native) worker
                self.log.info(
                    "Order node to start {worker_logname}",
                    worker_logname=worker_logname,
                )

                d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions())

                @inlineCallbacks
                def configure_worker(res, worker_logname, worker_type, worker_id, worker):
                    self.log.info(
                        "Ok, node has started {worker_logname}",
                        worker_logname=worker_logname,
                    )

                    # now configure the worker
                    self.log.info(
                        "Configuring {worker_logname} ..",
                        worker_logname=worker_logname,
                    )
                    method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_'))
                    try:
                        config_fn = getattr(self, method_name)
                    except AttributeError:
                        raise ValueError(
                            "A native worker of type '{}' is configured but "
                            "there is no method '{}' on {}".format(worker_type, method_name, type(self))
                        )
                    yield config_fn(worker_logname, worker_id, worker)
                    self.log.info(
                        "Ok, {worker_logname} configured",
                        worker_logname=worker_logname,
                    )

                d.addCallback(configure_worker, worker_logname, worker_type, worker_id, worker)

            # guest worker processes setup
            elif worker_type == u'guest':

                # now actually start the (guest) worker ..

                # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers
                # only take the options (which is part of the whole config item for the worker)
                d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions())

            else:
                raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type))

            if parallel_worker_start:
                dl.append(d)
            else:
                yield d

        yield gatherResults(dl)

        self.log.info(hl('Ok, local node configuration booted successfully!', color='green', bold=True))
示例#41
0
文件: main.py 项目: goeddea/crossbar
def _run_command_exec_worker(options, reactor=None, personality=None):
    """
    Entry point into (native) worker processes. This wires up stuff such that
    a worker instance is talking WAMP-over-stdio to the node controller.
    """
    import os
    import sys
    import platform
    import signal

    # https://coverage.readthedocs.io/en/coverage-4.4.2/subprocess.html#measuring-sub-processes
    MEASURING_COVERAGE = False
    if 'COVERAGE_PROCESS_START' in os.environ:
        try:
            import coverage
        except ImportError:
            pass
        else:
            # The following will read the environment variable COVERAGE_PROCESS_START,
            # and that should be set to the .coveragerc file:
            #
            #   export COVERAGE_PROCESS_START=${PWD}/.coveragerc
            #
            coverage.process_startup()
            MEASURING_COVERAGE = True

    # we use an Autobahn utility to import the "best" available Twisted reactor
    from autobahn.twisted.choosereactor import install_reactor
    reactor = install_reactor(options.reactor)

    # make sure logging to something else than stdio is setup _first_
    from crossbar._logging import make_JSON_observer, cb_logging_aware
    from txaio import make_logger, start_logging
    from twisted.logger import globalLogPublisher
    from twisted.python.reflect import qual

    log = make_logger()

    # Print a magic phrase that tells the capturing logger that it supports
    # Crossbar's rich logging
    print(cb_logging_aware, file=sys.__stderr__)
    sys.__stderr__.flush()

    flo = make_JSON_observer(sys.__stderr__)
    globalLogPublisher.addObserver(flo)

    # Ignore SIGINT so we get consistent behavior on control-C versus
    # sending SIGINT to the controller process. When the controller is
    # shutting down, it sends TERM to all its children but ctrl-C
    # handling will send a SIGINT to all the processes in the group
    # (so then the controller sends a TERM but the child already or
    # will very shortly get a SIGINT as well). Twisted installs signal
    # handlers, but not for SIGINT if there's already a custom one
    # present.
    def ignore(sig, frame):
        log.debug("Ignoring SIGINT in worker.")
    signal.signal(signal.SIGINT, ignore)

    # actually begin logging
    start_logging(None, options.loglevel)

    # get personality klass, eg "crossbar.personality.Personality"
    l = options.personality.split('.')
    personality_module, personality_klass = '.'.join(l[:-1]), l[-1]

    # now load the personality module and class
    _mod = importlib.import_module(personality_module)
    Personality = getattr(_mod, personality_klass)

    # get worker klass, eg "crossbar.worker.container.ContainerController"
    l = options.klass.split('.')
    worker_module, worker_klass = '.'.join(l[:-1]), l[-1]

    # now load the worker module and class
    _mod = importlib.import_module(worker_module)
    klass = getattr(_mod, worker_klass)

    log.info(
        'Starting worker "{worker_id}" for node "{node_id}" with personality "{personality}" {worker_class}',
        worker_id=options.worker,
        node_id=options.node,
        personality=Personality.NAME,
        worker_class=hltype(klass),
    )
    log.info(
        'Running as PID {pid} on {python}-{reactor}',
        pid=os.getpid(),
        python=platform.python_implementation(),
        reactor=qual(reactor.__class__).split('.')[-1],
    )
    if MEASURING_COVERAGE:
        log.info(hl('Code coverage measurements enabled (coverage={coverage_version}).', color='green', bold=True),
                 coverage_version=coverage.__version__)

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle('crossbar-worker [{}]'.format(options.klass))

    # node directory
    #
    options.cbdir = os.path.abspath(options.cbdir)
    os.chdir(options.cbdir)
    # log.msg("Starting from node directory {}".format(options.cbdir))

    # set process title if requested to
    #
    try:
        import setproctitle
    except ImportError:
        log.debug("Could not set worker process title (setproctitle not installed)")
    else:
        if options.title:
            setproctitle.setproctitle(options.title)
        else:
            setproctitle.setproctitle(
                'crossbar-worker [{}]'.format(options.klass)
            )

    from twisted.internet.error import ConnectionDone
    from autobahn.twisted.websocket import WampWebSocketServerProtocol

    class WorkerServerProtocol(WampWebSocketServerProtocol):

        def connectionLost(self, reason):
            # the behavior here differs slightly whether we're shutting down orderly
            # or shutting down because of "issues"
            if isinstance(reason.value, ConnectionDone):
                was_clean = True
            else:
                was_clean = False

            try:
                # this log message is unlikely to reach the controller (unless
                # only stdin/stdout pipes were lost, but not stderr)
                if was_clean:
                    log.info("Connection to node controller closed cleanly")
                else:
                    log.warn("Connection to node controller lost: {reason}", reason=reason)

                # give the WAMP transport a change to do it's thing
                WampWebSocketServerProtocol.connectionLost(self, reason)
            except:
                # we're in the process of shutting down .. so ignore ..
                pass
            finally:
                # after the connection to the node controller is gone,
                # the worker is "orphane", and should exit

                # determine process exit code
                if was_clean:
                    exit_code = 0
                else:
                    exit_code = 1

                # exit the whole worker process when the reactor has stopped
                reactor.addSystemEventTrigger('after', 'shutdown', os._exit, exit_code)

                # stop the reactor
                try:
                    reactor.stop()
                except ReactorNotRunning:
                    pass

    try:
        # define a WAMP application session factory
        #
        from autobahn.wamp.types import ComponentConfig

        def make_session():
            session_config = ComponentConfig(realm=options.realm, extra=options)
            session = klass(config=session_config, reactor=reactor, personality=Personality)
            return session

        # create a WAMP-over-WebSocket transport server factory
        #
        from autobahn.twisted.websocket import WampWebSocketServerFactory
        transport_factory = WampWebSocketServerFactory(make_session, u'ws://localhost')
        transport_factory.protocol = WorkerServerProtocol
        transport_factory.setProtocolOptions(failByDrop=False)

        # create a protocol instance and wire up to stdio
        #
        from twisted.python.runtime import platform as _platform
        from twisted.internet import stdio
        proto = transport_factory.buildProtocol(None)
        if _platform.isWindows():
            stdio.StandardIO(proto)
        else:
            stdio.StandardIO(proto, stdout=3)

        # now start reactor loop
        #
        if False:
            log.info("vmprof enabled.")

            import os
            import vmprof

            PROFILE_FILE = 'vmprof_{}.dat'.format(os.getpid())

            outfd = os.open(PROFILE_FILE, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
            vmprof.enable(outfd, period=0.01)

            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

            vmprof.disable()
        else:
            log.info(hl('Entering event reactor ...', color='cyan', bold=True))
            reactor.run()

    except Exception as e:
        log.info("Unhandled exception: {e}", e=e)
        if reactor.running:
            reactor.addSystemEventTrigger('after', 'shutdown', os._exit, 1)
            reactor.stop()
        else:
            sys.exit(1)
示例#42
0
文件: main.py 项目: goeddea/crossbar
def _run_command_start(options, reactor, personality):
    """
    Subcommand "crossbar start".
    """
    # do not allow to run more than one Crossbar.io instance
    # from the same Crossbar.io node directory
    #
    pid_data = _check_is_running(options.cbdir)
    if pid_data:
        print("Crossbar.io is already running from node directory {} (PID {}).".format(options.cbdir, pid_data['pid']))
        sys.exit(1)
    else:
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        with open(fp, 'wb') as fd:
            argv = options.argv
            options_dump = vars(options)
            pid_data = {
                'pid': os.getpid(),
                'argv': argv,
                'options': {x: y for x, y in options_dump.items()
                            if x not in ["func", "argv"]}
            }
            fd.write("{}\n".format(
                json.dumps(
                    pid_data,
                    sort_keys=False,
                    indent=4,
                    separators=(', ', ': '),
                    ensure_ascii=False
                )
            ).encode('utf8'))

    # remove node PID file when reactor exits
    #
    def remove_pid_file():
        fp = os.path.join(options.cbdir, _PID_FILENAME)
        if os.path.isfile(fp):
            os.remove(fp)
    reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file)

    log = make_logger()

    # represents the running Crossbar.io node
    #
    node_options = personality.NodeOptions(debug_lifecycle=options.debug_lifecycle,
                                           debug_programflow=options.debug_programflow)

    node = personality.Node(personality,
                            options.cbdir,
                            reactor=reactor,
                            options=node_options)

    # print the banner, personality and node directory
    #
    for line in personality.BANNER.splitlines():
        log.info(hl(line, color='yellow', bold=True))
    log.info('')
    log.info('Initializing {node_personality} node from node directory {cbdir} {node_class}',
             node_personality=personality,
             cbdir=hlid(options.cbdir),
             node_class=hltype(personality.Node))

    # possibly generate new node key
    #
    node.load_keys(options.cbdir)

    # check and load the node configuration
    #
    try:
        node.load_config(options.config)
    except InvalidConfigException as e:
        log.failure()
        log.error("Invalid node configuration")
        log.error("{e!s}", e=e)
        sys.exit(1)
    except:
        raise

    # https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorCore.html
    # Each "system event" in Twisted, such as 'startup', 'shutdown', and 'persist', has 3 phases:
    # 'before', 'during', and 'after' (in that order, of course). These events will be fired
    # internally by the Reactor.

    def before_reactor_started():
        term_print('CROSSBAR:REACTOR_STARTING')

    def after_reactor_started():
        term_print('CROSSBAR:REACTOR_STARTED')

    reactor.addSystemEventTrigger('before', 'startup', before_reactor_started)
    reactor.addSystemEventTrigger('after', 'startup', after_reactor_started)

    def before_reactor_stopped():
        term_print('CROSSBAR:REACTOR_STOPPING')

    def after_reactor_stopped():
        # FIXME: we are indeed reaching this line, however,
        # the log output does not work (it also doesnt work using
        # plain old print). Dunno why.

        # my theory about this issue is: by the time this line
        # is reached, Twisted has already closed the stdout/stderr
        # pipes. hence we do an evil trick: we directly write to
        # the process' controlling terminal
        # https://unix.stackexchange.com/a/91716/52500
        term_print('CROSSBAR:REACTOR_STOPPED')

    reactor.addSystemEventTrigger('before', 'shutdown', before_reactor_stopped)
    reactor.addSystemEventTrigger('after', 'shutdown', after_reactor_stopped)

    # now actually start the node ..
    #
    exit_info = {'was_clean': None}

    def start_crossbar():
        term_print('CROSSBAR:NODE_STARTING')

        #
        # ****** main entry point of node ******
        #
        d = node.start()

        # node started successfully, and later ..
        def on_startup_success(_shutdown_complete):
            term_print('CROSSBAR:NODE_STARTED')

            shutdown_complete = _shutdown_complete['shutdown_complete']

            # .. exits, signaling exit status _inside_ the result returned
            def on_shutdown_success(shutdown_info):
                exit_info['was_clean'] = shutdown_info['was_clean']
                log.info('on_shutdown_success: was_clean={was_clean}', shutdown_info['was_clean'])

            # should not arrive here:
            def on_shutdown_error(err):
                exit_info['was_clean'] = False
                log.error("on_shutdown_error: {tb}", tb=failure_format_traceback(err))

            shutdown_complete.addCallbacks(on_shutdown_success, on_shutdown_error)

        # node could not even start
        def on_startup_error(err):
            term_print('CROSSBAR:NODE_STARTUP_FAILED')
            exit_info['was_clean'] = False
            log.error("Could not start node: {tb}", tb=failure_format_traceback(err))
            if reactor.running:
                reactor.stop()

        d.addCallbacks(on_startup_success, on_startup_error)

    # Call a function when the reactor is running. If the reactor has not started, the callable
    # will be scheduled to run when it does start.
    reactor.callWhenRunning(start_crossbar)

    # Special feature to automatically shutdown the node after this many seconds
    if options.shutdownafter:

        @inlineCallbacks
        def _shutdown():
            term_print('CROSSBAR:SHUTDOWN_AFTER_FIRED')
            shutdown_info = yield node.stop()
            exit_info['was_clean'] = shutdown_info['was_clean']
            term_print('CROSSBAR:SHUTDOWN_AFTER_COMPLETE')

        reactor.callLater(options.shutdownafter, _shutdown)

    # now enter event loop ..
    #
    log.info(hl('Entering event reactor ...', color='cyan', bold=True))
    term_print('CROSSBAR:REACTOR_ENTERED')
    reactor.run()

    # once the reactor has finally stopped, we get here, and at that point,
    # exit_info['was_clean'] MUST have been set before - either to True or to False
    # (otherwise we are missing a code path to handle in above)

    # exit the program with exit code depending on whether the node has been cleanly shut down
    if exit_info['was_clean'] is True:
        term_print('CROSSBAR:EXIT_WITH_SUCCESS')
        sys.exit(0)

    elif exit_info['was_clean'] is False:
        term_print('CROSSBAR:EXIT_WITH_ERROR')
        sys.exit(1)

    else:
        term_print('CROSSBAR:EXIT_WITH_INTERNAL_ERROR')
        sys.exit(1)
示例#43
0
文件: node.py 项目: goeddea/crossbar
    def boot_from_config(self, config):
        """
        Startup elements in the node as specified in the provided node configuration.
        """
        self.log.info('Configuring node from local configuration {method}',
                      method=hltype(Node.boot_from_config))

        # get controller configuration subpart
        controller = config.get('controller', {})

        # start Manhole in node controller
        if 'manhole' in controller:
            yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions())
            self.log.debug("controller: manhole started")

        # startup all workers
        workers = config.get('workers', [])
        if len(workers):
            self.log.info('Starting {nworkers} workers ...', nworkers=len(workers))
        else:
            self.log.info('No workers configured!')

        for worker in workers:

            # worker ID
            if 'id' in worker:
                worker_id = worker.pop('id')
            else:
                worker_id = u'worker-{:03d}'.format(self._worker_no)
                self._worker_no += 1

            # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest')
            worker_type = worker['type']

            # native worker processes setup
            if worker_type in self._native_workers:

                # set logname depending on native worker type
                worker_logname = '{} "{}"'.format(self._native_workers[worker_type]['logname'], worker_id)

                # any worker specific options
                worker_options = worker.get('options', {})

                # now actually start the (native) worker ..
                yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions())

                # setup native worker generic stuff
                method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_'))
                try:
                    config_fn = getattr(self, method_name)
                except AttributeError:
                    raise ValueError(
                        "A native worker of type '{}' is configured but "
                        "there is no method '{}' on {}".format(worker_type, method_name, type(self))
                    )
                yield config_fn(worker_logname, worker_id, worker)

            # guest worker processes setup
            elif worker_type == u'guest':

                # now actually start the (guest) worker ..

                # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers
                # only take the options (which is part of the whole config item for the worker)
                yield self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions())

            else:
                raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type))

        self.log.info('Local node configuration applied successfully!')