def forward(*args, **kwargs): details = kwargs.pop('details', None) if details: match = pat.match(details.topic) if match: node_id, worker_id = match.groups() # FIXME: map back from node authid (?) to node OID (as UUID string)? self.log.debug( 'Forwarding CFC {forward_type} on mrealm {realm} from node {node_id} and worker {worker_id} [local=<{local_uri}>, remote=<{remote_uri}>]', forward_type=hl('EVENT'), local_uri=hlid(local_uri), remote_uri=hlid(details.topic), node_id=hluserid(node_id), worker_id=hluserid(worker_id), realm=hluserid(session._realm)) return session.publish( local_uri, node_id, worker_id, *args, **kwargs, options=PublishOptions(exclude_me=False)) # should not arrive here session.log.warn( 'received unexpected event to forward for management API: local_uri={local_uri}, remote_uri={remote_uri}, remote_uri_regex={remote_uri_regex} details={details}', local_uri=local_uri, remote_uri=remote_uri, remote_uri_regex=remote_uri_regex, details=details)
def kill(): self._market_session.call('wamp.session.kill_by_authid', call_details.caller_authid) self.log.info( 'Ok, session {caller_session} logged out for client with pubkey {caller_pubkey} ', caller_session=hlid(caller_session_id), caller_pubkey=hlid(binascii.b2a_hex(caller_pubkey).decode()))
def attach(self, session): """ Implements :func:`autobahn.wamp.interfaces.IRouter.attach` """ self.log.debug('{func}(session={session})', func=hltype(self.attach), session=session) if session._session_id not in self._session_id_to_session: self._session_id_to_session[session._session_id] = session else: raise Exception("session with ID {} already attached".format( session._session_id)) self._broker.attach(session) self._dealer.attach(session) self._attached += 1 self.log.info( 'attached session {session} to realm "{realm}" (authid="{authid}", authrole="{authrole}") {func}', func=hltype(self.attach), session=hlid(session._session_id) if session else '', authid=hlid(session._authid), authrole=hlid(session._authrole), realm=hlid(session._realm)) return { 'broker': self._broker._role_features, 'dealer': self._dealer._role_features }
def __init__(self, worker, config, path): """ :param worker: The router worker controller within this Web service is started. :type worker: crossbar.worker.router.RouterController :param config: The Web service configuration item. :type config: dict """ resource.Resource.__init__(self) self._worker = worker self._config = config self._session_cache = {} self._realm_name = config.get('wamp', {}).get('realm', None) self._authrole = config.get('wamp', {}).get('authrole', 'anonymous') self._service_agent = worker.realm_by_name(self._realm_name).session # TODO: # We need to lookup the credentials for the current user based on the pre-established # HTTP session cookie, this will establish the 'authrole' the user is running as. # This 'authrole' can then be used to authorize the back-end topic call. # QUESTION: # Does the topic need the authid, if so, how do we pass it? # # This is our default (anonymous) session for unauthenticated users # router = worker._router_factory.get(self._realm_name) self._default_session = ApplicationSession(ComponentConfig(realm=self._realm_name, extra=None)) worker._router_session_factory.add(self._default_session, router, authrole=self._authrole) # Setup Jinja2 to point to our templates folder # templates_dir = os.path.abspath( os.path.join(self._worker.config.extra.cbdir, config.get("templates"))) env = Environment(loader=FileSystemLoader(templates_dir), autoescape=True) self.log.info( 'WapResource added on realm "{realm}" (authrole "{authrole}") using templates directory "{templates_dir}"', realm=hlid(self._realm_name), authrole=hlid(self._authrole), templates_dir=hlid(templates_dir)) # http://werkzeug.pocoo.org/docs/dev/routing/#werkzeug.routing.Map map = Map() # Add all our routes into 'map', note each route endpoint is a tuple of the # topic to call, and the template to use when rendering the results. for route in config.get('routes', {}): route_url = '/' + path + route.get('path') route_methods = [route.get('method')] route_endpoint = (route['call'], env.get_template(route['render'])) map.add(Rule(route_url, methods=route_methods, endpoint=route_endpoint)) self.log.info( 'WapResource route added (url={route_url}, methods={route_methods}, endpoint={route_endpoint})', route_url=route_url, route_methods=route_methods, route_endpoint=route_endpoint) # http://werkzeug.pocoo.org/docs/dev/routing/#werkzeug.routing.MapAdapter # http://werkzeug.pocoo.org/docs/dev/routing/#werkzeug.routing.MapAdapter.match self._map_adapter = map.bind('/')
def has_role(self, realm: str, authrole: str) -> bool: """ Check if a role with the given name is currently running in the given realm. :param realm: WAMP realm (name, _not_ run-time ID). :type realm: str :param authrole: WAMP authentication role (URI, _not_ run-time ID). :type authrole: str :returns: True if realm is running. :rtype: bool """ authrole = authrole or 'trusted' result = realm in self.realm_to_id and self.realm_to_id[ realm] in self.realms if result: realm_id = self.realm_to_id[realm] result = (authrole in self.realms[realm_id].role_to_id and self.realms[realm_id].role_to_id[authrole] in self.realms[realm_id].roles) # note: this is to enable eg built-in "trusted" authrole result = result or authrole in self._service_sessions[realm] self.log.debug( '{func}(realm="{realm}", authrole="{authrole}") -> {result}', func=hltype(RouterController.has_role), realm=hlid(realm), authrole=hlid(authrole), result=hlval(result)) return result
def detach(self, session=None): self.log.debug('{func}(session={session})', func=hltype(self.detach), session=session) detached_session_ids = [] if session is None: # detach all sessions from router for session in list(self._session_id_to_session.values()): self._detach(session) detached_session_ids.append(session._session_id) else: # detach single session from router self._detach(session) detached_session_ids.append(session._session_id) self.log.info( 'detached session {session} from realm "{realm}" (authid="{authid}", authrole="{authrole}", detached {detached_session_ids} sessions total) {func}', func=hltype(self.detach), session=hlid(session._session_id) if session else '', authid=hlid(session._authid), authrole=hlid(session._authrole), detached_session_ids=hlval(len(detached_session_ids)), realm=hlid(session._realm)) return detached_session_ids
def _add_global_roles(self): controller_role_config = { # there is exactly 1 WAMP component authenticated under authrole "controller": the node controller "name": "controller", "permissions": [{ # the node controller can (locally) do "anything" "uri": "crossbar.", "match": "prefix", "allow": { "call": True, "register": True, "publish": True, "subscribe": True }, "disclose": { "caller": True, "publisher": True }, "cache": True }] } self._router_factory.add_role(self._realm, controller_role_config) self.log.info('{func} node-wide role "{authrole}" added on node management router realm "{realm}"', func=hltype(self._add_global_roles), authrole=hlid(controller_role_config['name']), realm=hlid(self._realm))
def load_config(self, configfile=None): """ Check and load the node configuration from: * from ``.crossbar/config.json`` or * from built-in (empty) default configuration This is the _second_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ config_source = None config_path = None # if the node hasn't been configured from XBR network, fallback to loading config from local config file if not self._config: default_filename = pkg_resources.resource_filename('crossbar', self.DEFAULT_CONFIG_PATH) with open(default_filename) as f: default_config = json.load(f) config_source, config_path = node.Node.load_config(self, configfile, default_config) self.log.info('Node configuration loaded from {config_source} ({config_path})', config_source=hlid(config_source), config_path=hlid(config_path)) # Docker host integration if _HAS_DOCKER and self._config and 'controller' in self._config: self._enable_docker = self._config['controller'].get('enable_docker', False) return config_source, config_path
def on_management_event(*args, **kwargs): if not (self._manager and self._manager.is_attached()): self.log.warn( "Can't foward management event: CFC session not attached") return details = kwargs.pop('details') # a node local event such as 'crossbar.node.on_ready' is mogrified to 'local.crossbar.node.on_ready' # (one reason is that URIs such as 'wamp.*' and 'crossbar.*' are restricted to trusted sessions, and # the management bridge is connecting over network to the uplink CFC and hence can't be trusted) # topic = self._translate_uri(details.topic) try: yield self._manager.publish( topic, *args, options=PublishOptions(acknowledge=True), **kwargs) except Exception: self.log.failure( "Failed to forward event on topic '{topic}': {log_failure.value}", topic=topic, ) else: if topic.endswith('.on_log'): log = self.log.debug else: log = self.log.debug log('Forwarded management {forward_type} to CFC [local_uri={local_topic}, remote_uri={remote_topic}]', forward_type=hl('EVENT'), local_topic=hlid(details.topic), remote_topic=hlid(topic))
def get_router_realm_link(self, realm_id, link_id, details=None): """ Get router link detail information. :param realm_id: The ID of the (local) realm of the link. :type realm_id: str :param link_id: The ID of the router link to return. :type link_id: str :returns: Router link detail information. :rtype: dict """ assert type(realm_id) == str assert type(link_id) == str assert isinstance(details, CallDetails) self.log.info( 'Get router link {link_id} on realm {realm_id} {method}', link_id=hlid(link_id), realm_id=hlid(realm_id), method=hltype(RouterController.get_router_realm_links)) if realm_id not in self.realms: raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(realm_id)) rlink_manager = self.realms[realm_id].rlink_manager if link_id not in rlink_manager: raise ApplicationError("crossbar.error.no_such_object", "No link with ID '{}'".format(link_id)) rlink = rlink_manager[link_id] return rlink.marshal()
def _process_Market_MarketCreated(transactionHash, blockHash, args): # /// Event emitted when a new market was created. # event MarketCreated (bytes16 indexed marketId, uint32 marketSeq, address owner, string terms, string meta, # address maker, uint256 providerSecurity, uint256 consumerSecurity, uint256 marketFee); self.log.info( '{event}: processing event (tx_hash={tx_hash}, block_hash={block_hash}) - XBR market created with ID {market_id})', event=hlcontract('XBRMarket.MarketCreated'), tx_hash=hlid('0x' + binascii.b2a_hex(transactionHash).decode()), block_hash=hlid('0x' + binascii.b2a_hex(blockHash).decode()), market_id=hlid(uuid.UUID(bytes=args.marketId))) market_id = uuid.UUID(bytes=args.marketId) if args.terms: h = multihash.decode(multihash.from_b58_string(args.terms)) if h.name != 'sha2-256': self.log.warn( 'WARNING: XBRMarket.MarketCreated - terms "{terms}" is not an IPFS (sha2-256) b58-encoded multihash', terms=hlval(args.terms)) if args.meta: h = multihash.decode(multihash.from_b58_string(args.meta)) if h.name != 'sha2-256': self.log.warn( 'WARNING: XBRMarket.MarketCreated - meta "{meta}" is not an IPFS (sha2-256) b58-encoded multihash', meta=hlval(args.meta)) stored = False with self._db.begin(write=True) as txn: market = self._xbr.markets[txn, market_id] if market: self.log.warn('{contract}(tx_hash={tx_hash}) record already stored in database.', contract=hlcontract('MarketCreated'), tx_hash=hlid('0x' + binascii.b2a_hex(transactionHash).decode())) else: market = cfxdb.xbr.market.Market() market.market = market_id market.timestamp = np.datetime64(time_ns(), 'ns') # FIXME # market.created = args.created market.seq = args.marketSeq market.owner = bytes(HexBytes(args.owner)) market.terms = args.terms market.meta = args.meta market.maker = bytes(HexBytes(args.maker)) market.provider_security = args.providerSecurity market.consumer_security = args.consumerSecurity market.market_fee = args.marketFee self._xbr.markets[txn, market_id] = market stored = True if stored: self.log.info('new {contract}(market_id={market_id}) record stored database!', contract=hlcontract('MarketCreated'), market_id=hlid(market_id))
def forward_call(*args, **kwargs): kwargs.pop('details', None) self.log.debug( 'Forwarding management {forward_type} from CFC .. [remote_uri={remote_uri}, local_uri={local_uri}]', forward_type=hl('CALL'), local_uri=hlid(local_uri), remote_uri=hlid(remote_uri)) return self.call(local_uri, *args, **kwargs)
def _process_block(self, w3, block_number, Events): """ :param w3: :param block_number: :param Events: :return: """ cnt = 0 # filter by block, and XBR contract addresses # FIXME: potentially add filters for global data or market specific data for the markets started in this worker filter_params = { 'address': [ xbr.xbrtoken.address, xbr.xbrnetwork.address, xbr.xbrcatalog.address, xbr.xbrmarket.address, xbr.xbrchannel.address ], 'fromBlock': block_number, 'toBlock': block_number, } result = w3.eth.getLogs(filter_params) if result: for evt in result: receipt = w3.eth.getTransactionReceipt(evt['transactionHash']) for Event, handler in Events: # FIXME: MismatchedABI pops up .. we silence this with errors=web3.logs.DISCARD if hasattr(web3, 'logs') and web3.logs: all_res = Event().processReceipt(receipt, errors=web3.logs.DISCARD) else: all_res = Event().processReceipt(receipt) for res in all_res: self.log.info('{handler} processing block {block_number} / txn {txn} with args {args}', handler=hl(handler.__name__), block_number=hlid(block_number), txn=hlid('0x' + binascii.b2a_hex(evt['transactionHash']).decode()), args=hlval(res.args)) handler(res.transactionHash, res.blockHash, res.args) cnt += 1 with self._db.begin(write=True) as txn: block = cfxdb.xbr.block.Block() block.timestamp = np.datetime64(time_ns(), 'ns') block.block_number = block_number # FIXME # block.block_hash = bytes() block.cnt_events = cnt self._xbr.blocks[txn, pack_uint256(block_number)] = block if cnt: self.log.info('Processed blockchain block {block_number}: processed {cnt} XBR events.', block_number=hlid(block_number), cnt=hlid(cnt)) else: self.log.info('Processed blockchain block {block_number}: no XBR events found!', block_number=hlid(block_number)) return cnt
def set_service_session(self, session, realm, authrole=None): self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")', func=hltype(self.set_service_session), session=session, realm=hlid(realm), authrole=hlid(authrole)) if realm not in self._service_sessions: self._service_sessions[realm] = {} self._service_sessions[realm][authrole] = session
def connect_success(session): self.log.debug( 'authenticator service session {session_id} attached to realm "{realm}" with authrole "{authrole}" {func}', func=hltype(self._init_dynamic_authenticator), session_id=hlid(session._session_id), authrole=hlid(session._authrole), realm=hlid(session._realm)) self._authenticator_session = session d_ready.callback(None)
def kill_by_authid(self, realm_id, authid, reason, message=None, details=None): self.log.info('Killing sessions by authid="{authid}" ..', realm_id=hlid(realm_id), authid=hlid(authid), method=hltype(RouterController.start_router_realm)) if realm_id not in self.realms: raise ApplicationError("crossbar.error.no_such_object", "No realm with ID '{}'".format(realm_id)) # forward call directly to service agent return self.realms[realm_id].session.session_kill_by_authid(authid, reason, message=message, details=details)
def _configure_native_worker_proxy(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start transports on proxy for i, transport in enumerate(worker.get('transports', [])): if 'id' in transport: transport_id = transport['id'] else: transport_id = 'transport{:03d}'.format(i) transport['id'] = transport_id self.log.info( "Order {worker_logname} to start Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) # XXX we're doing startup, and begining proxy workers -- # want to share the web-transport etc etc stuff between # these and otehr kinds of routers / transports yield self._controller.call(u'crossbar.worker.{}.start_proxy_transport'.format(worker_id), transport_id, transport, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) for i, backend in enumerate(worker.get('backends', [])): if 'id' in backend: backend_id = backend['id'] else: backend_id = 'backend{:03d}'.format(i) backend['id'] = backend_id self.log.info( "Order {worker_logname} to start BackendTransport {backend_id}", worker_logname=worker_logname, backend_id=hlid(backend_id), ) yield self._controller.call(u'crossbar.worker.{}.start_proxy_backend'.format(worker_id), backend_id, backend, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started BackendTransport {backend_id}", worker_logname=worker_logname, transport_id=hlid(backend_id), )
def get_service_session(self, realm, authrole=None): if realm in self._service_sessions: if authrole in self._service_sessions[realm]: session = self._service_sessions[realm][authrole] self.log.info('{func}(session={session}, realm="{realm}", authrole="{authrole}")', func=hltype(self.get_service_session), session=session, realm=hlid(realm), authrole=hlid(authrole)) return succeed(session) return succeed(None)
def _ready(s): # this is different from "self.config.controller._realm" !! self.log.info( 'Container component ready: component_id="{component_id}", realm="{realm}", authrole="{authrole}", authid="{authid}", session={session} {func}', func=hltype(self.onJoin), component_id=hlid(component_id), realm=hlid(session._realm), authid=hlid(session._authid), authrole=hlid(session._authrole), session=hlid(session._session_id)) if not joined_d.called: joined_d.callback(None)
def onJoin(self, details): # register our API on all configured sessions and then fire onready # on_ready = self.config.extra.get('onready', None) if self.config.extra else None try: for session, prefix, _ in self._expose_on_sessions: regs = yield session.register( self, options=RegisterOptions(details_arg='details'), prefix=prefix) for reg in regs: if isinstance(reg, Registration): self.log.debug( 'Registered WAMP meta procedure <{proc}> on realm "{realm}"', proc=reg.procedure, realm=session._realm) elif isinstance(reg, Failure): err = reg.value if isinstance(err, ApplicationError): self.log.warn( 'Failed to register WAMP meta procedure on realm "{realm}": {error} ("{message}")', realm=session._realm, error=err.error, message=err.error_message()) else: self.log.warn( 'Failed to register WAMP meta procedure on realm "{realm}": {error}', realm=session._realm, error=str(err)) else: self.log.warn( 'Failed to register WAMP meta procedure on realm "{realm}": {error}', realm=session._realm, error=str(reg)) except Exception as e: self.log.failure() if on_ready: on_ready.errback(e) self.leave() else: self.log.info( '{func}: realm service session attached to realm "{realm}" [session_id={session_id}, authid="{authid}", authrole="{authrole}", on_ready={on_ready}]', func=hltype(self.onJoin), realm=hlid(details.realm), session_id=hlid(details.session), authid=hlid(details.authid), authrole=hlid(details.authrole), on_ready=on_ready, ) if on_ready: on_ready.callback(self)
def get_service_session(self, realm, authrole): authrole = authrole or 'trusted' session = None if realm in self._service_sessions: if authrole in self._service_sessions[realm]: session = self._service_sessions[realm][authrole] self.log.debug( '{func}(realm="{realm}", authrole="{authrole}") -> {session}', func=hltype(self.get_service_session), session=session, realm=hlid(realm), authrole=hlid(authrole)) return succeed(session)
def _process_Network_MemberRegistered(transactionHash, blockHash, args): # /// Event emitted when a new member joined the XBR Network. # event MemberCreated (address indexed member, string eula, string profile, MemberLevel level); self.log.info( '{event}: processing event (tx_hash={tx_hash}, block_hash={block_hash}) - XBR member created at address {address})', event=hlcontract('XBRNetwork.MemberCreated'), tx_hash=hlid('0x' + binascii.b2a_hex(transactionHash).decode()), block_hash=hlid('0x' + binascii.b2a_hex(blockHash).decode()), address=hlid(args.member)) member_adr = bytes(HexBytes(args.member)) if args.eula: h = multihash.decode(multihash.from_b58_string(args.eula)) if h.name != 'sha2-256': self.log.warn( 'WARNING: XBRNetwork.MemberCreated - eula "{eula}" is not an IPFS (sha2-256) b58-encoded multihash', eula=hlval(args.eula)) if args.profile: h = multihash.decode(multihash.from_b58_string(args.profile)) if h.name != 'sha2-256': self.log.warn( 'WARNING: XBRNetwork.MemberCreated - profile "{profile}" is not an IPFS (sha2-256) b58-encoded multihash', eula=hlval(args.profile)) stored = False with self._db.begin(write=True) as txn: member = self._xbr.members[txn, member_adr] if member: self.log.warn('{contract}(tx_hash={tx_hash}) record already stored in database.', contract=hlcontract('TokenApproval'), tx_hash=hlid('0x' + binascii.b2a_hex(transactionHash).decode())) else: member = cfxdb.xbr.member.Member() member.address = member_adr member.timestamp = np.datetime64(time_ns(), 'ns') member.registered = args.registered member.eula = args.eula member.profile = args.profile member.level = args.level self._xbr.members[txn, member_adr] = member stored = True if stored: self.log.info('new {contract}(member_adr={member_adr}) record stored database!', contract=hlcontract('MemberCreated'), member_adr=hlid('0x' + binascii.b2a_hex(member_adr).decode()))
def start_market_maker(self, maker_id, config, details=None): """ Starts a XBR Market Maker providing services in a specific XBR market. """ if type(maker_id) != str: emsg = 'maker_id has invalid type {}'.format(type(maker_id)) raise ApplicationError('wamp.error.invalid_argument', emsg) if not isinstance(config, Mapping): emsg = 'maker_id has invalid type {}'.format(type(config)) raise ApplicationError('wamp.error.invalid_argument', emsg) if maker_id in self._makers: emsg = 'could not start market maker: a market maker with ID "{}" is already running (or starting)'.format( maker_id) raise ApplicationError('crossbar.error.already_running', emsg) self.personality.check_market_maker(self.personality, config) self.log.info('XBR Market Maker "{maker_id}" starting with config:\n{config}', maker_id=hlid(maker_id), config=pformat(config)) maker = MarketMaker(self, maker_id, config, self._db, self._ipfs_files_dir) self._makers[maker_id] = maker self._maker_adr2id[maker.address] = maker_id yield maker.start() status = yield maker.status() self.log.info('{msg}: {accounts} local accounts, current block number is {current_block_no}', msg=hl('Blockchain status', color='green', bold=True), current_block_no=hlid(status['current_block_no']), accounts=hlid(len(status['accounts']))) started = { 'id': maker_id, 'address': maker.address, } self.publish(u'{}.on_maker_started'.format(self._uri_prefix), started) self.log.info( 'XBR Market Maker "{maker_id}" (address {maker_adr}) started. Now running {maker_cnt} market makers in total in this worker component.', maker_id=maker_id, maker_adr=maker.address, maker_cnt=len(self._makers)) returnValue(started)
def ok(_): self.transports[transport_id] = router_transport if config['endpoint']['type'] == 'tcp': endpoint = 'TCP port {}'.format(config['endpoint']['port']) if 'portrange' in config['endpoint']: transport_type = 'TCP/{} transport'.format( config['endpoint']['portrange']) else: transport_type = 'TCP/{} transport'.format( config['endpoint']['port']) elif config['endpoint']['type'] == 'unix': endpoint = 'UDS path "{}"'.format(config['endpoint']['path']) transport_type = 'Unix domain socket transport' else: endpoint = 'unknown' transport_type = 'unknown' self.log.info( 'Router {transport_type} started as transport "{transport_id}" and listening on {endpoint}', transport_type=hlval(transport_type), transport_id=hlid(transport_id), endpoint=hlval(endpoint)) topic = '{}.on_router_transport_started'.format(self._uri_prefix) self.publish(topic, event, options=PublishOptions(exclude=caller)) return router_transport.marshal()
def onLeave(self, details): """ :param details: :return: """ self.log.info('XBR Markets Worker shutting down ({market_cnt} markets to shutdown) ..', market_cnt=len(self._makers)) self._status = self.STATUS_STOPPING # shutdown each market .. makers = list(self._makers.values()) for maker in makers: yield maker.stop() self.log.info('Market Maker "{maker_id}" stopped.', maker_id=hlid(maker._id)) self._stop_monitor = True # stop blockchain monitoring background thread, possibly waking up # the thread from some blocking activity (like sleeping in a syscall) if not self._run_monitor.is_set(): self._run_monitor.set() # make sure to join the background thread, so this main thread running # in the process exits - and thus the whole process if self._monitor_blockchain_thread: self._monitor_blockchain_thread.join() # disconnect from router self.disconnect() self._status = None self.log.info('XBR Markets Worker shutdown complete!')
async def _logout(self, call_details: CallDetails): caller_session_id = call_details.caller caller_pubkey = self._pubkey_by_session.pop(caller_session_id, None) assert is_cs_pubkey(caller_pubkey) self.log.info( '{klass}.logout_member with caller pubkey {caller_pubkey})', klass=self.__class__.__name__, caller_pubkey=hlid(binascii.b2a_hex(caller_pubkey).decode())) with self._db.begin(write=True) as txn: del self._xbrmm.user_keys[txn, caller_pubkey] logout_info = { 'logged_out': time_ns(), 'from_session': caller_session_id, 'pubkey': caller_pubkey, } def kill(): self._market_session.call('wamp.session.kill_by_authid', call_details.caller_authid) self.log.info( 'Ok, session {caller_session} logged out for client with pubkey {caller_pubkey} ', caller_session=hlid(caller_session_id), caller_pubkey=hlid(binascii.b2a_hex(caller_pubkey).decode())) # first return from this call, before killing its session .. self._reactor.callLater(0, kill) return logout_info
def load_config(self, configfile=None): """ Check and load the node configuration from: * from ``.crossbar/config.json`` or * from built-in (empty) default configuration This is the _second_ function being called after the Node has been instantiated. IMPORTANT: this function is run _before_ start of Twisted reactor! """ if configfile: configpath = os.path.abspath(os.path.join(self._cbdir, configfile)) self.log.debug('Loading node configuration from "{configpath}" ..', configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = self.personality.check_config_file(self.personality, configpath) self.log.info('Node configuration loaded from {configpath}', configpath=hlid(configpath)) return Node.CONFIG_SOURCE_LOCALFILE else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } self.personality.check_config(self.personality, self._config) self.log.info('Node configuration loaded from built-in config.') return Node.CONFIG_SOURCE_DEFAULT
def load_config(self, configfile=None): """ Check and load the node configuration (usually, from ".crossbar/config.json") or load built-in empty config. """ if configfile: configpath = os.path.abspath(os.path.join(self._cbdir, configfile)) self.log.debug('Loading node configuration from "{configpath}" ..', configpath=configpath) # the following will read the config, check the config and replace # environment variable references in configuration values ("${MYVAR}") and # finally return the parsed configuration object self._config = self.personality.check_config_file(self.personality, configpath) self.log.info('Node configuration loaded from {configpath}', configpath=hlid(configpath)) else: self._config = { u'version': 2, u'controller': {}, u'workers': [] } self.personality.check_config(self.personality, self._config) self.log.info('Node configuration loaded from built-in config.')
def start_worker(self, worker_id, worker_type, worker_options=None, details=None): """ Start a new worker process in the node. """ self.log.info( 'Starting {worker_type} worker {worker_id} {worker_klass}', worker_type=worker_type, worker_id=hlid(worker_id), worker_klass=hltype(NodeController.start_worker)) if type(worker_id) != str or worker_id in [u'controller', u'']: raise Exception('invalid worker ID "{}"'.format(worker_id)) if worker_type == u'guest': return self._start_guest_worker(worker_id, worker_options, details=details) elif worker_type in self._node._native_workers: return self._start_native_worker(worker_type, worker_id, worker_options, details=details) else: raise Exception('invalid worker type "{}"'.format(worker_type))
def configure_worker(res, worker_logname, worker_type, worker_id, worker): self.log.info( "Ok, node has started {worker_logname}", worker_logname=worker_logname, ) # now configure the worker self.log.info( "Configuring {worker_logname} ..", worker_logname=worker_logname, ) method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError("A native worker of type '{}' is configured but " "there is no method '{}' on {}".format( worker_type, method_name, type(self))) try: yield config_fn(worker_logname, worker_id, worker) except ApplicationError as e: if e.error != 'wamp.error.canceled': raise self.log.info( 'Ok, worker "{worker_logname}" configured and ready!', worker_logname=hlid(worker_logname), )
def start_worker(self, worker_id, worker_type, worker_options=None, details=None): """ Start a new worker process in the node. """ self.log.info('Starting {worker_type} worker {worker_id} {worker_klass}', worker_type=worker_type, worker_id=hlid(worker_id), worker_klass=hltype(NodeController.start_worker)) if worker_type == u'guest': return self._start_guest_worker(worker_id, worker_options, details=details) elif worker_type in self._node._native_workers: return self._start_native_worker(worker_type, worker_id, worker_options, details=details) else: raise Exception('invalid worker type "{}"'.format(worker_type))
def _maybe_generate_key(cbdir, privfile=u'key.priv', pubfile=u'key.pub'): was_new = True privkey_path = os.path.join(cbdir, privfile) pubkey_path = os.path.join(cbdir, pubfile) if os.path.exists(privkey_path): # node private key seems to exist already .. check! priv_tags = _parse_key_file(privkey_path, private=True) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519', u'private-key-ed25519']: if tag not in priv_tags: raise Exception("Corrupt node private key file {} - {} tag not found".format(privkey_path, tag)) privkey_hex = priv_tags[u'private-key-ed25519'] privkey = signing.SigningKey(privkey_hex, encoder=encoding.HexEncoder) pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=encoding.HexEncoder).decode('ascii') if priv_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node private key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) if os.path.exists(pubkey_path): pub_tags = _parse_key_file(pubkey_path, private=False) for tag in [u'creator', u'created-at', u'machine-id', u'public-key-ed25519']: if tag not in pub_tags: raise Exception("Corrupt node public key file {} - {} tag not found".format(pubkey_path, tag)) if pub_tags[u'public-key-ed25519'] != pubkey_hex: raise Exception( ("Inconsistent node public key file {} - public-key-ed25519 doesn't" " correspond to private-key-ed25519").format(pubkey_path) ) else: log.info( "Node public key file {pub_path} not found - re-creating from node private key file {priv_path}", pub_path=pubkey_path, priv_path=privkey_path, ) pub_tags = OrderedDict([ (u'creator', priv_tags[u'creator']), (u'created-at', priv_tags[u'created-at']), (u'machine-id', priv_tags[u'machine-id']), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, pub_tags, msg) log.info('Node key files exist and are valid. Node public key is {pubkey}', pubkey=hlid('0x' + pubkey_hex)) was_new = False else: # node private key does not yet exist: generate one privkey = signing.SigningKey.generate() privkey_hex = privkey.encode(encoder=encoding.HexEncoder).decode('ascii') pubkey = privkey.verify_key pubkey_hex = pubkey.encode(encoder=encoding.HexEncoder).decode('ascii') # first, write the public file tags = OrderedDict([ (u'creator', _creator()), (u'created-at', utcnow()), (u'machine-id', _machine_id()), (u'public-key-ed25519', pubkey_hex), ]) msg = u'Crossbar.io node public key\n\n' _write_node_key(pubkey_path, tags, msg) # now, add the private key and write the private file tags[u'private-key-ed25519'] = privkey_hex msg = u'Crossbar.io node private key - KEEP THIS SAFE!\n\n' _write_node_key(privkey_path, tags, msg) log.info('New node key pair generated! Public key is {pubkey}', pubkey=hlid('0x' + pubkey_hex)) # fix file permissions on node public/private key files # note: we use decimals instead of octals as octal literals have changed between Py2/3 # if os.stat(pubkey_path).st_mode & 511 != 420: # 420 (decimal) == 0644 (octal) os.chmod(pubkey_path, 420) log.info("File permissions on node public key fixed") if os.stat(privkey_path).st_mode & 511 != 384: # 384 (decimal) == 0600 (octal) os.chmod(privkey_path, 384) log.info("File permissions on node private key fixed") log.info( 'Node key loaded from {priv_path}', priv_path=hlid(privkey_path), ) return was_new, cryptosign.SigningKey(privkey)
def _configure_native_worker_router(self, worker_logname, worker_id, worker): yield self._configure_native_worker_common(worker_logname, worker_id, worker) # start realms on router for realm in worker.get('realms', []): # start realm if 'id' in realm: realm_id = realm['id'] else: realm_id = 'realm{:03d}'.format(self._realm_no) realm['id'] = realm_id self._realm_no += 1 self.log.info( "Order {worker_logname} to start Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) yield self._controller.call(u'crossbar.worker.{}.start_router_realm'.format(worker_id), realm_id, realm, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Realm {realm_id}", worker_logname=worker_logname, realm_id=hlid(realm_id), ) # add roles to realm for role in realm.get('roles', []): if 'id' in role: role_id = role['id'] else: role_id = 'role{:03d}'.format(self._role_no) role['id'] = role_id self._role_no += 1 self.log.info( "Order Realm {realm_id} to start Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) yield self._controller.call(u'crossbar.worker.{}.start_router_realm_role'.format(worker_id), realm_id, role_id, role, options=CallOptions()) self.log.info( "Ok, Realm {realm_id} has started Role {role_id}", realm_id=hlid(realm_id), role_id=hlid(role_id), ) # start components to run embedded in the router for component in worker.get('components', []): if 'id' in component: component_id = component['id'] else: component_id = 'component{:03d}'.format(self._component_no) component['id'] = component_id self._component_no += 1 yield self._controller.call(u'crossbar.worker.{}.start_router_component'.format(worker_id), component_id, component, options=CallOptions()) self.log.info( "{logname}: component '{component}' started", logname=worker_logname, component=component_id, ) # start transports on router for transport in worker.get('transports', []): if 'id' in transport: transport_id = transport['id'] else: transport_id = 'transport{:03d}'.format(self._transport_no) transport['id'] = transport_id self._transport_no += 1 add_paths_on_transport_create = False self.log.info( "Order {worker_logname} to start Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) yield self._controller.call(u'crossbar.worker.{}.start_router_transport'.format(worker_id), transport_id, transport, create_paths=add_paths_on_transport_create, options=CallOptions()) self.log.info( "Ok, {worker_logname} has started Transport {transport_id}", worker_logname=worker_logname, transport_id=hlid(transport_id), ) if not add_paths_on_transport_create: if transport['type'] == 'web': paths = transport.get('paths', {}) elif transport['type'] == 'universal': paths = transport.get('web', {}).get('paths', {}) else: paths = None # Web service paths if paths: for path in sorted(paths): if path != '/': webservice = paths[path] if 'id' in webservice: webservice_id = webservice['id'] else: webservice_id = 'webservice{:03d}'.format(self._webservice_no) webservice['id'] = webservice_id self._webservice_no += 1 self.log.info( "Order Transport {transport_id} to start Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), ) yield self._controller.call(u'crossbar.worker.{}.start_web_transport_service'.format(worker_id), transport_id, path, webservice, options=CallOptions()) self.log.info( "Ok, Transport {transport_id} has started Web Service {webservice_id}", transport_id=hlid(transport_id), webservice_id=hlid(webservice_id), path=hluserid(path), )
def boot_from_config(self, config): """ Startup elements in the node as specified in the provided node configuration. """ # get controller configuration subpart controller = config.get('controller', {}) parallel_worker_start = controller.get('options', {}).get('enable_parallel_worker_start', False) self.log.info('{bootmsg} {method}', bootmsg=hl('Booting node from local configuration [parallel_worker_start={}] ..'.format(parallel_worker_start), color='green', bold=True), method=hltype(Node.boot_from_config)) # start Manhole in node controller if 'manhole' in controller: yield self._controller.call(u'crossbar.start_manhole', controller['manhole'], options=CallOptions()) self.log.debug("controller: manhole started") # startup all workers workers = config.get('workers', []) if len(workers): self.log.info(hl('Will start {} worker{} ..'.format(len(workers), 's' if len(workers) > 1 else ''), color='green', bold=True)) else: self.log.info(hl('No workers configured, nothing to do', color='green', bold=True)) dl = [] for worker in workers: # worker ID if 'id' in worker: worker_id = worker['id'] else: worker_id = u'worker{:03d}'.format(self._worker_no) worker['id'] = worker_id self._worker_no += 1 # worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest') worker_type = worker['type'] # native worker processes setup if worker_type in self._native_workers: # set logname depending on native worker type worker_logname = '{} {}'.format(self._native_workers[worker_type]['logname'], hlid(worker_id)) # any worker specific options worker_options = worker.get('options', {}) # start the (native) worker self.log.info( "Order node to start {worker_logname}", worker_logname=worker_logname, ) d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker_options, options=CallOptions()) @inlineCallbacks def configure_worker(res, worker_logname, worker_type, worker_id, worker): self.log.info( "Ok, node has started {worker_logname}", worker_logname=worker_logname, ) # now configure the worker self.log.info( "Configuring {worker_logname} ..", worker_logname=worker_logname, ) method_name = '_configure_native_worker_{}'.format(worker_type.replace('-', '_')) try: config_fn = getattr(self, method_name) except AttributeError: raise ValueError( "A native worker of type '{}' is configured but " "there is no method '{}' on {}".format(worker_type, method_name, type(self)) ) yield config_fn(worker_logname, worker_id, worker) self.log.info( "Ok, {worker_logname} configured", worker_logname=worker_logname, ) d.addCallback(configure_worker, worker_logname, worker_type, worker_id, worker) # guest worker processes setup elif worker_type == u'guest': # now actually start the (guest) worker .. # FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers # only take the options (which is part of the whole config item for the worker) d = self._controller.call(u'crossbar.start_worker', worker_id, worker_type, worker, options=CallOptions()) else: raise Exception('logic error: unexpected worker_type="{}"'.format(worker_type)) if parallel_worker_start: dl.append(d) else: yield d yield gatherResults(dl) self.log.info(hl('Ok, local node configuration booted successfully!', color='green', bold=True))
def start_router_realm(self, realm_id, realm_config, details=None): """ Starts a realm on this router worker. :param realm_id: The ID of the realm to start. :type realm_id: str :param realm_config: The realm configuration. :type realm_config: dict :param details: Call details. :type details: autobahn.wamp.types.CallDetails """ self.log.info('Starting router realm {realm_id} {method}', realm_id=hlid(realm_id), method=hltype(RouterController.start_router_realm)) # prohibit starting a realm twice # if realm_id in self.realms: emsg = "Could not start realm: a realm with ID '{}' is already running (or starting)".format(realm_id) self.log.error(emsg) raise ApplicationError(u'crossbar.error.already_running', emsg) # check configuration # try: self.personality.check_router_realm(self.personality, realm_config) except Exception as e: emsg = "Invalid router realm configuration: {}".format(e) self.log.error(emsg) raise ApplicationError(u"crossbar.error.invalid_configuration", emsg) # URI of the realm to start realm = realm_config['name'] # router/realm wide options options = realm_config.get('options', {}) enable_meta_api = options.get('enable_meta_api', True) # expose router/realm service API additionally on local node management router bridge_meta_api = options.get('bridge_meta_api', False) if bridge_meta_api: # FIXME bridge_meta_api_prefix = u'crossbar.worker.{worker_id}.realm.{realm_id}.root.'.format(worker_id=self._worker_id, realm_id=realm_id) else: bridge_meta_api_prefix = None # track realm rlm = self.router_realm_class(realm_id, realm_config) self.realms[realm_id] = rlm self.realm_to_id[realm] = realm_id # create a new router for the realm router = self._router_factory.start_realm(rlm) # add a router/realm service session extra = { # the RouterServiceAgent will fire this when it is ready 'onready': Deferred(), # if True, forward the WAMP meta API (implemented by RouterServiceAgent) # that is normally only exposed on the app router/realm _additionally_ # to the local node management router. 'enable_meta_api': enable_meta_api, 'bridge_meta_api': bridge_meta_api, 'bridge_meta_api_prefix': bridge_meta_api_prefix, # the management session on the local node management router to which # the WAMP meta API is exposed to additionally, when the bridge_meta_api option is set 'management_session': self, } cfg = ComponentConfig(realm, extra) rlm.session = RouterServiceAgent(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') yield extra['onready'] self.log.info('Realm "{realm_id}" (name="{realm_name}") started', realm_id=realm_id, realm_name=rlm.session._realm) self.publish(u'{}.on_realm_started'.format(self._uri_prefix), realm_id)
def start(self, node_id=None): """ Starts this node. This will start a node controller and then spawn new worker processes as needed. """ self.log.info('Starting {personality} node {method}', personality=self.personality.NAME, method=hltype(Node.start)) # a configuration must have been loaded before if not self._config: raise Exception("No node configuration set") # a node can only be started once for now assert self._shutdown_complete is None assert self._node_id is None # get controller config/options controller_config = self._config.get('controller', {}) controller_options = controller_config.get('options', {}) # the node ID: CLI takes precedence over config over hostname if node_id: self._node_id = node_id _node_id_source = 'explicit run-time argument' elif 'id' in controller_config: self._node_id = controller_config['id'] _node_id_source = 'explicit configuration' else: self._node_id = u'{}'.format(socket.gethostname()).lower() _node_id_source = 'hostname' self.log.info('Node ID {node_id} set from {node_id_source}', node_id=hlid(self._node_id), node_id_source=_node_id_source) # set controller process title try: import setproctitle except ImportError: self.log.warn("Warning, could not set process title (setproctitle not installed)") else: setproctitle.setproctitle(controller_options.get('title', 'crossbar-controller')) # local node management router self._router_factory = RouterFactory(self._node_id, None) self._router_session_factory = RouterSessionFactory(self._router_factory) rlm_config = { 'name': self._realm } rlm = RouterRealm(None, rlm_config) router = self._router_factory.start_realm(rlm) # setup global static roles self._add_global_roles() # always add a realm service session cfg = ComponentConfig(self._realm) rlm.session = (self.ROUTER_SERVICE)(cfg, router) self._router_session_factory.add(rlm.session, authrole=u'trusted') self.log.debug('Router service session attached [{router_service}]', router_service=qual(self.ROUTER_SERVICE)) # add the node controller singleton component self._controller = self.NODE_CONTROLLER(self) self._router_session_factory.add(self._controller, authrole=u'trusted') self.log.debug('Node controller attached [{node_controller}]', node_controller=qual(self.NODE_CONTROLLER)) # add extra node controller components self._add_extra_controller_components(controller_options) # setup Node shutdown triggers self._set_shutdown_triggers(controller_options) # setup node shutdown Deferred self._shutdown_complete = Deferred() # startup the node personality .. yield self.personality.Node.boot(self) # notify systemd that we are fully up and running try: import sdnotify except ImportError: # do nothing on non-systemd platforms pass else: sdnotify.SystemdNotifier().notify("READY=1") # return a shutdown deferred which we will fire to notify the code that # called start() - which is the main crossbar boot code res = { 'shutdown_complete': self._shutdown_complete } returnValue(res)
def _run_command_start(options, reactor, personality): """ Subcommand "crossbar start". """ # do not allow to run more than one Crossbar.io instance # from the same Crossbar.io node directory # pid_data = _check_is_running(options.cbdir) if pid_data: print("Crossbar.io is already running from node directory {} (PID {}).".format(options.cbdir, pid_data['pid'])) sys.exit(1) else: fp = os.path.join(options.cbdir, _PID_FILENAME) with open(fp, 'wb') as fd: argv = options.argv options_dump = vars(options) pid_data = { 'pid': os.getpid(), 'argv': argv, 'options': {x: y for x, y in options_dump.items() if x not in ["func", "argv"]} } fd.write("{}\n".format( json.dumps( pid_data, sort_keys=False, indent=4, separators=(', ', ': '), ensure_ascii=False ) ).encode('utf8')) # remove node PID file when reactor exits # def remove_pid_file(): fp = os.path.join(options.cbdir, _PID_FILENAME) if os.path.isfile(fp): os.remove(fp) reactor.addSystemEventTrigger('after', 'shutdown', remove_pid_file) log = make_logger() # represents the running Crossbar.io node # node_options = personality.NodeOptions(debug_lifecycle=options.debug_lifecycle, debug_programflow=options.debug_programflow) node = personality.Node(personality, options.cbdir, reactor=reactor, options=node_options) # print the banner, personality and node directory # for line in personality.BANNER.splitlines(): log.info(hl(line, color='yellow', bold=True)) log.info('') log.info('Initializing {node_personality} node from node directory {cbdir} {node_class}', node_personality=personality, cbdir=hlid(options.cbdir), node_class=hltype(personality.Node)) # possibly generate new node key # node.load_keys(options.cbdir) # check and load the node configuration # try: node.load_config(options.config) except InvalidConfigException as e: log.failure() log.error("Invalid node configuration") log.error("{e!s}", e=e) sys.exit(1) except: raise # https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IReactorCore.html # Each "system event" in Twisted, such as 'startup', 'shutdown', and 'persist', has 3 phases: # 'before', 'during', and 'after' (in that order, of course). These events will be fired # internally by the Reactor. def before_reactor_started(): term_print('CROSSBAR:REACTOR_STARTING') def after_reactor_started(): term_print('CROSSBAR:REACTOR_STARTED') reactor.addSystemEventTrigger('before', 'startup', before_reactor_started) reactor.addSystemEventTrigger('after', 'startup', after_reactor_started) def before_reactor_stopped(): term_print('CROSSBAR:REACTOR_STOPPING') def after_reactor_stopped(): # FIXME: we are indeed reaching this line, however, # the log output does not work (it also doesnt work using # plain old print). Dunno why. # my theory about this issue is: by the time this line # is reached, Twisted has already closed the stdout/stderr # pipes. hence we do an evil trick: we directly write to # the process' controlling terminal # https://unix.stackexchange.com/a/91716/52500 term_print('CROSSBAR:REACTOR_STOPPED') reactor.addSystemEventTrigger('before', 'shutdown', before_reactor_stopped) reactor.addSystemEventTrigger('after', 'shutdown', after_reactor_stopped) # now actually start the node .. # exit_info = {'was_clean': None} def start_crossbar(): term_print('CROSSBAR:NODE_STARTING') # # ****** main entry point of node ****** # d = node.start() # node started successfully, and later .. def on_startup_success(_shutdown_complete): term_print('CROSSBAR:NODE_STARTED') shutdown_complete = _shutdown_complete['shutdown_complete'] # .. exits, signaling exit status _inside_ the result returned def on_shutdown_success(shutdown_info): exit_info['was_clean'] = shutdown_info['was_clean'] log.info('on_shutdown_success: was_clean={was_clean}', shutdown_info['was_clean']) # should not arrive here: def on_shutdown_error(err): exit_info['was_clean'] = False log.error("on_shutdown_error: {tb}", tb=failure_format_traceback(err)) shutdown_complete.addCallbacks(on_shutdown_success, on_shutdown_error) # node could not even start def on_startup_error(err): term_print('CROSSBAR:NODE_STARTUP_FAILED') exit_info['was_clean'] = False log.error("Could not start node: {tb}", tb=failure_format_traceback(err)) if reactor.running: reactor.stop() d.addCallbacks(on_startup_success, on_startup_error) # Call a function when the reactor is running. If the reactor has not started, the callable # will be scheduled to run when it does start. reactor.callWhenRunning(start_crossbar) # Special feature to automatically shutdown the node after this many seconds if options.shutdownafter: @inlineCallbacks def _shutdown(): term_print('CROSSBAR:SHUTDOWN_AFTER_FIRED') shutdown_info = yield node.stop() exit_info['was_clean'] = shutdown_info['was_clean'] term_print('CROSSBAR:SHUTDOWN_AFTER_COMPLETE') reactor.callLater(options.shutdownafter, _shutdown) # now enter event loop .. # log.info(hl('Entering event reactor ...', color='cyan', bold=True)) term_print('CROSSBAR:REACTOR_ENTERED') reactor.run() # once the reactor has finally stopped, we get here, and at that point, # exit_info['was_clean'] MUST have been set before - either to True or to False # (otherwise we are missing a code path to handle in above) # exit the program with exit code depending on whether the node has been cleanly shut down if exit_info['was_clean'] is True: term_print('CROSSBAR:EXIT_WITH_SUCCESS') sys.exit(0) elif exit_info['was_clean'] is False: term_print('CROSSBAR:EXIT_WITH_ERROR') sys.exit(1) else: term_print('CROSSBAR:EXIT_WITH_INTERNAL_ERROR') sys.exit(1)