def handle(self): config = self.request.raw_request # Creates a pubsub_tool that will handle this subscription and registers it with pubsub pubsub_tool = PubSubTool(self.pubsub, self.server, config['endpoint_type']) # Makes this sub_key known to pubsub pubsub_tool.add_sub_key(config['sub_key']) # Common message for both local server and broker msg = { 'cluster_id': self.server.cluster_id, 'server_name': self.server.name, 'server_pid': self.server.pid, 'sub_key': config['sub_key'], 'endpoint_type': config['endpoint_type'], 'task_delivery_interval': config['task_delivery_interval'], } # Register this delivery task with current server's pubsub but only if we do not have it already. # It is possible that we do, for instance: # # 1) This server had this task when it was starting up # 2) The task was migrated to another server # self.pubsub.set_sub_key_server(msg) # Update in-RAM state of workers msg['action'] = BROKER_MSG_PUBSUB.SUB_KEY_SERVER_SET.value self.broker_client.publish(msg)
def handle(self): config = self.request.raw_request # Creates a pubsub_tool that will handle this subscription and registers it with pubsub pubsub_tool = PubSubTool(self.pubsub, self.server, config['endpoint_type']) # Makes this sub_key known to pubsub pubsub_tool.add_sub_key(config['sub_key']) # Common message for both local server and broker msg = { 'cluster_id': self.server.cluster_id, 'server_name': self.server.name, 'server_pid': self.server.pid, 'sub_key': config['sub_key'], 'endpoint_type': config['endpoint_type'], 'task_delivery_interval': config['task_delivery_interval'], } # Update in-RAM state of workers msg['action'] = BROKER_MSG_PUBSUB.SUB_KEY_SERVER_SET.value self.broker_client.publish(msg)
class WebSocket(_WebSocket): """ Encapsulates information about an individual connection from a WebSocket client. """ def __init__(self, container, config, _unusued_sock, _unusued_protocols, _unusued_extensions, wsgi_environ, **kwargs): # The object containing this WebSocket self.container = container # Note: configuration object is shared by all WebSockets and any writes will be visible to all of them self.config = config # For later reference self.initial_http_wsgi_environ = wsgi_environ # Referred to soon enough so created here self.pub_client_id = 'ws.{}'.format(new_cid()) super(WebSocket, self).__init__(_unusued_sock, _unusued_protocols, _unusued_extensions, wsgi_environ, **kwargs) def _init(self): # Python-level ID contains all the core details, our own ID and that of the thread (greenlet) that creates us _current_thread = current_thread() python_id = '{}.{}.{}'.format(hex(id(self)), _current_thread.name, hex(_current_thread.ident)) # Assign core attributes to this object before calling parent class self.python_id = python_id # Must be set here and then to True later on because our parent class may already want # to accept connections, and we need to postpone their processing until we are initialized fully. self._initialized = False self.has_session_opened = False self._token = None self.update_lock = RLock() self.ext_client_id = None self.ext_client_name = None self.connection_time = self.last_seen = datetime.utcnow() self.sec_type = self.config.sec_type self.pings_missed = 0 self.pings_missed_threshold = self.config.get('pings_missed_threshold', 5) self.user_data = Bunch() # Arbitrary user-defined data self._disconnect_requested = False # Have we been asked to disconnect this client? # Last the we received a ping response (pong) from our peer self.ping_last_response_time = None # # If the peer ever subscribes to a pub/sub topic we will periodically # store in the ODB information about the last time the peer either sent # or received anything from us. Note that we store it if: # # * The peer has at least one subscription, and # * At least self.pubsub_interact_interval seconds elapsed since the last update # # And: # # * The peer received a pub/sub message, or # * The peer sent a pub/sub message # # Or: # # * The peer did not send or receive anything, but # * The peer correctly responds to ping messages # # Such a logic ensures that we do not overwhelm the database with frequent updates # if the peer uses pub/sub heavily - it is costly to do it for each message. # # At the same time, if the peer does not receive or send anything but it is still connected # (because it responds to ping) we set its SQL status too. # # All of this lets background processes clean up WSX clients that subscribe at one # point but they are never seen again, which may (theoretically) happen if a peer disconnects # in a way that does not allow for Zato to clean up its subscription status in the ODB. # self.pubsub_interact_interval = WEB_SOCKET.DEFAULT.INTERACT_UPDATE_INTERVAL self.interact_last_updated = None self.last_interact_source = None self.interact_last_set = None # Manages access to service hooks if self.config.hook_service: self.hook_tool = HookTool(self.config.parallel_server, HookCtx, hook_type_to_method, self.invoke_service) self.on_connected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_CONNECTED) self.on_disconnected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED) self.on_pubsub_response_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_PUBSUB_RESPONSE) else: self.hook_tool = None self.on_connected_service_invoker = None self.on_disconnected_service_invoker = None self.on_pubsub_response_service_invoker = None # For publish/subscribe over WSX self.pubsub_tool = PubSubTool(self.config.parallel_server.worker_store.pubsub, self, PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id, self.deliver_pubsub_msg) # Active WebSocket client ID (WebSocketClient model, web_socket_client.id in SQL) self._sql_ws_client_id = None # For tokens assigned externally independent of our WS-level self.token. # Such tokens will be generated by Vault, for instance. self.ext_token = None # Drop WSGI keys pointing to complex Python objects such as sockets for name in _wsgi_drop_keys: self.initial_http_wsgi_environ.pop(name, None) # Responses to previously sent requests - keyed by request IDs self.responses_received = {} _local_address = self.sock.getsockname() self._local_address = '{}:{}'.format(_local_address[0], _local_address[1]) _peer_address = self.sock.getpeername() self._peer_address = '{}:{}'.format(_peer_address[0], _peer_address[1]) self.forwarded_for = self.initial_http_wsgi_environ.get('HTTP_X_FORWARDED_FOR') if self.forwarded_for: self.forwarded_for_fqdn = socket.getfqdn(self.forwarded_for) else: self.forwarded_for_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN _peer_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN try: self._peer_host = socket.gethostbyaddr(_peer_address[0])[0] _peer_fqdn = socket.getfqdn(self._peer_host) except Exception: logger.warn(format_exc()) finally: self._peer_fqdn = _peer_fqdn self.peer_conn_info_pretty = self.get_peer_info_pretty() self._parse_func = { DATA_FORMAT.JSON: self.parse_json, DATA_FORMAT.XML: self.parse_xml, }[self.config.data_format] # All set, we can process connections now self._initialized = True # ################################################################################################################################ @property def token(self): return self._token @token.setter def token(self, value): if not self._token: self._token = TokenInfo(value, self.config.token_ttl) else: self._token.value = value self._token.extend() # ################################################################################################################################ # This is a property so as to make it easier to add logging calls to observe what is getting and setting the value @property def sql_ws_client_id(self): return self._sql_ws_client_id @sql_ws_client_id.setter def sql_ws_client_id(self, value): self._sql_ws_client_id = value # ################################################################################################################################ def set_last_interaction_data(self, source, _now=datetime.utcnow, _interval=WEB_SOCKET.DEFAULT.INTERACT_UPDATE_INTERVAL): """ Updates metadata regarding pub/sub about this WSX connection. """ with self.update_lock: # Local aliases now = _now() # Update last interaction metadata time for our peer self.last_interact_source = source # It is possible that we set the metadata the first time, # in which case we will always invoke the service, having first stored current timestamp for later use. if not self.interact_last_set: self.interact_last_set = now needs_services = True else: # We must have been already called before, in which case we execute services only if it is our time to do it. needs_services = True if self.interact_last_updated + timedelta(minutes=_interval) < now else False # Are we to invoke the services this time? if needs_services: now_formatted = now.isoformat() pub_sub_request = { 'sub_key': self.pubsub_tool.get_sub_keys(), 'last_interaction_time': now_formatted, 'last_interaction_type': self.last_interact_source, 'last_interaction_details': self.get_peer_info_pretty(), } wsx_request = { 'id': self.sql_ws_client_id, 'last_seen': now_formatted, } logger.info('Setting pub/sub interaction metadata `%s`', pub_sub_request) self.invoke_service('zato.pubsub.subscription.update-interaction-metadata', pub_sub_request) logger.info('Setting WSX last seen `%s`', wsx_request) self.invoke_service('zato.channel.web-socket.client.set-last-seen', wsx_request) # Finally, store it for the future use self.interact_last_updated = now # ################################################################################################################################ def deliver_pubsub_msg(self, sub_key, msg): """ Delivers one or more pub/sub messages to the connected WSX client. """ ctx = {} if isinstance(msg, PubSubMessage): len_msg = 1 else: len_msg = len(msg) msg = msg[0] if len_msg == 1 else msg # A list of messages is given on input so we need to serialize each of them individually if isinstance(msg, list): cid = new_cid() data = [] for elem in msg: data.append(elem.serialized if elem.serialized else elem.to_external_dict()) if elem.reply_to_sk: ctx_reply_to_sk = ctx.setdefault('', []) ctx_reply_to_sk.append(elem.reply_to_sk) # A single message was given on input else: cid = msg.pub_msg_id data = msg.serialized if msg.serialized else msg.to_external_dict() if msg.reply_to_sk: ctx['reply_to_sk'] = msg.reply_to_sk logger.info('Delivering %d pub/sub message{} to sub_key `%s` (ctx:%s)'.format('s' if len_msg > 1 else ''), len_msg, sub_key, ctx) # Actually deliver messages self.invoke_client(cid, data, ctx=ctx, _Class=InvokeClientPubSubRequest) # We get here if there was no exception = we can update pub/sub metadata self.set_last_interaction_data('pubsub.deliver_pubsub_msg') # ################################################################################################################################ def add_sub_key(self, sub_key): self.pubsub_tool.add_sub_key(sub_key) # ################################################################################################################################ def remove_sub_key(self, sub_key): self.pubsub_tool.remove_sub_key(sub_key) # ################################################################################################################################ def add_pubsub_message(self, sub_key, message): self.pubsub_tool.add_message(sub_key, message) # ################################################################################################################################ def get_peer_info_dict(self): return { 'name': self.ext_client_name, 'ext_client_id': self.ext_client_id, 'forwarded_for_fqdn': self.forwarded_for_fqdn, 'peer_fqdn': self._peer_fqdn, 'pub_client_id': self.pub_client_id, 'python_id': self.python_id, 'sock': str(getattr(self, 'sock', '')), 'swc': self.sql_ws_client_id, } # ################################################################################################################################ def get_peer_info_pretty(self): return 'name:`{}` id:`{}` fwd_for:`{}` conn:`{}` pub:`{}`, py:`{}`, sock:`{}`, swc:`{}`'.format( self.ext_client_name, self.ext_client_id, self.forwarded_for_fqdn, self._peer_fqdn, self.pub_client_id, self.python_id, getattr(self, 'sock', ''), self.sql_ws_client_id) # ################################################################################################################################ def get_on_connected_hook(self): """ Returns a hook triggered when a new connection was made. """ if self.hook_tool: return self.on_connected_service_invoker # ################################################################################################################################ def get_on_disconnected_hook(self): """ Returns a hook triggered when an existing connection was dropped. """ if self.hook_tool: return self.on_disconnected_service_invoker # ################################################################################################################################ def get_on_pubsub_hook(self): """ Returns a hook triggered when a pub/sub response arrives from the connected client. """ if self.hook_tool: return self.on_pubsub_response_service_invoker # ################################################################################################################################ def parse_json(self, data, _create_session=WEB_SOCKET.ACTION.CREATE_SESSION, _response=WEB_SOCKET.ACTION.CLIENT_RESPONSE): parsed = loads(data) msg = ClientMessage() meta = parsed.get('meta', {}) if meta: meta = bunchify(meta) msg.action = meta.get('action', _response) msg.id = meta.id msg.timestamp = meta.timestamp msg.token = meta.get('token') # Optional because it won't exist during first authentication # self.ext_client_id and self.ext_client_name will exist after create-session action # so we use them if they are available but fall back to meta.client_id and meta.client_name during # the very create-session action. if meta.get('client_id'): self.ext_client_id = meta.client_id ext_client_name = meta.get('client_name') if ext_client_name: if isinstance(ext_client_name, dict): _ext_client_name = [] for key, value in sorted(ext_client_name.items()): _ext_client_name.append('{}: {}'.format(key, value)) ext_client_name = '; '.join(_ext_client_name) msg.ext_client_name = ext_client_name msg.ext_client_id = self.ext_client_id if msg.action == _create_session: msg.username = meta.get('username') # Secret is optional because WS channels may be without credentials attached msg.secret = meta.secret if self.config.needs_auth else '' msg.is_auth = True else: msg.in_reply_to = meta.get('in_reply_to') msg.is_auth = False ctx = meta.get('ctx') if ctx: msg.reply_to_sk = ctx.get('reply_to_sk') msg.deliver_to_sk = ctx.get('deliver_to_sk') msg.data = parsed.get('data', {}) return msg # ################################################################################################################################ def parse_xml(self, data): raise NotImplementedError('Not supported yet') # ################################################################################################################################ def create_session(self, cid, request, _sec_def_type_vault=SEC_DEF_TYPE.VAULT, _VAULT_TOKEN_HEADER=VAULT_TOKEN_HEADER): """ Creates a new session in the channel's auth backend and assigned metadata based on the backend's response. """ # This dictionary will be written to headers = {} if not self.config.needs_auth: can_create_session = True else: can_create_session = self.config.auth_func( request.cid, self.sec_type, {'username':request.username, 'secret':request.secret}, self.config.sec_name, self.config.vault_conn_default_auth_method, self.initial_http_wsgi_environ, headers) if can_create_session: with self.update_lock: # If we are using Vault, use its own header if self.config.sec_type == _sec_def_type_vault: self.ext_token = headers['zato.http.response.headers'][_VAULT_TOKEN_HEADER] self_token = self.ext_token # Otherwise, generate our own else: self_token = new_cid() self.token = 'ws.token.{}'.format(self_token) self.has_session_opened = True self.ext_client_id = request.ext_client_id self.ext_client_name = request.ext_client_name # Update peer name pretty now that we have more details about it self.peer_conn_info_pretty = self.get_peer_info_pretty() logger.info('Assigning wsx py:`%s` to `%s`', self.python_id, self.peer_conn_info_pretty) return AuthenticateResponse(self.token.value, request.cid, request.id).serialize() # ################################################################################################################################ def on_forbidden(self, action, data=copy_forbidden): cid = new_cid() logger.warn( 'Peer %s (%s) %s, closing its connection to %s (%s), cid:`%s` (%s)', self._peer_address, self._peer_fqdn, action, self._local_address, self.config.name, cid, self.peer_conn_info_pretty) self.send(Forbidden(cid, data).serialize()) self.server_terminated = True self.client_terminated = True # ################################################################################################################################ def send_background_pings(self, ping_extend=30): logger.info('Starting WSX background pings for `%s`', self.peer_conn_info_pretty) try: while self.stream: # Sleep for N seconds before sending a ping but check if we are connected upfront because # we could have disconnected in between while and sleep calls. sleep(ping_extend) # Ok, still connected if self.stream: try: response = self.invoke_client(new_cid(), None, use_send=False) except RuntimeError: logger.warn('Closing connection due to `%s`', format_exc()) self.on_socket_terminated() with self.update_lock: if response: self.pings_missed = 0 self.ping_last_response_time = datetime.utcnow() self.token.extend(ping_extend) else: # self._peer_address, action, self._local_address, self.config.name self.pings_missed += 1 if self.pings_missed < self.pings_missed_threshold: logger.warn( 'Peer %s (%s) missed %s/%s ping messages from %s (%s). Last response time: %s{} (%s)'.format( ' UTC' if self.ping_last_response_time else '', self.peer_conn_info_pretty), self._peer_address, self._peer_fqdn, self.pings_missed, self.pings_missed_threshold, self._local_address, self.config.name, self.ping_last_response_time) else: self.on_forbidden('missed {}/{} ping messages'.format( self.pings_missed, self.pings_missed_threshold)) # No stream = already disconnected, we can quit else: return except Exception: logger.warn(format_exc()) # ################################################################################################################################ def _get_hook_request(self): out = bunchify({ 'peer_address': self._peer_address, 'peer_host': self._peer_host, 'peer_fqdn': self._peer_fqdn, }) for name in HookCtx.__slots__: if name not in('hook_type', 'peer_address', 'peer_host', 'peer_fqdn', 'msg'): out[name] = getattr(self, name) return out # ################################################################################################################################ def register_auth_client(self, _assigned_msg='Assigned sws_id:`%s` to `%s` (%s %s %s)'): """ Registers peer in ODB and sets up background pings to keep its connection alive. Called only if authentication succeeded. """ self.sql_ws_client_id = self.invoke_service('zato.channel.web-socket.client.create', { 'pub_client_id': self.pub_client_id, 'ext_client_id': self.ext_client_id, 'ext_client_name': self.ext_client_name, 'is_internal': True, 'local_address': self.local_address, 'peer_address': self.peer_address, 'peer_fqdn': self._peer_fqdn, 'connection_time': self.connection_time, 'last_seen': self.last_seen, 'channel_name': self.config.name, 'peer_forwarded_for': self.forwarded_for, 'peer_forwarded_for_fqdn': self.forwarded_for_fqdn, }, needs_response=True).ws_client_id logger.info( _assigned_msg, self.sql_ws_client_id, self.python_id, self.pub_client_id, self.ext_client_id, self.ext_client_name) # Run the relevant on_connected hook, if any is available hook = self.get_on_connected_hook() if hook: hook(**self._get_hook_request()) spawn(self.send_background_pings) # ################################################################################################################################ def unregister_auth_client(self): """ Unregisters an already registered peer in ODB. """ if self.has_session_opened: # Deletes state from SQL self.invoke_service('zato.channel.web-socket.client.delete-by-pub-id', { 'pub_client_id': self.pub_client_id, }) if self.pubsub_tool.sub_keys: # Deletes across all workers the in-RAM pub/sub state about the client that is disconnecting self.invoke_service('zato.channel.web-socket.client.unregister-ws-sub-key', { 'sub_key_list': list(self.pubsub_tool.sub_keys), }) # Clears out our own delivery tasks self.pubsub_tool.remove_all_sub_keys() # Run the relevant on_connected hook, if any is available (even if the session was never opened) hook = self.get_on_disconnected_hook() if hook: hook(WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED, self.config.hook_service, **self._get_hook_request()) # ################################################################################################################################ def handle_create_session(self, cid, request): if request.is_auth: response = self.create_session(cid, request) if response: self.register_auth_client() self.send(response) logger.info( 'Client %s logged in successfully to %s (%s) (%s)', self.pub_client_id, self._local_address, self.config.name, self.peer_conn_info_pretty) else: self.on_forbidden('sent invalid credentials') else: self.on_forbidden('is not authenticated') # ################################################################################################################################ def invoke_service(self, service_name, data, cid=None, needs_response=True, _channel=CHANNEL.WEB_SOCKET, _data_format=DATA_FORMAT.DICT, serialize=False): # It is possible that this method will be invoked before self.__init__ completes, # because self's parent manages the underlying TCP stream, in which can self # will not be fully initialized yet so we need to wait a bit until it is. while not self._initialized: sleep(0.1) return self.config.on_message_callback({ 'cid': cid or new_cid(), 'data_format': _data_format, 'service': service_name, 'payload': data, 'environ': { 'web_socket': self, 'sql_ws_client_id': self.sql_ws_client_id, 'ws_channel_config': self.config, 'ws_token': self.token, 'ext_token': self.ext_token, 'pub_client_id': self.pub_client_id, 'ext_client_id': self.ext_client_id, 'ext_client_name': self.ext_client_name, 'peer_conn_info_pretty': self.peer_conn_info_pretty, 'connection_time': self.connection_time, 'pings_missed': self.pings_missed, 'pings_missed_threshold': self.pings_missed_threshold, 'peer_host': self._peer_host, 'peer_fqdn': self._peer_fqdn, 'forwarded_for': self.forwarded_for, 'forwarded_for_fqdn': self.forwarded_for_fqdn, 'initial_http_wsgi_environ': self.initial_http_wsgi_environ, }, }, CHANNEL.WEB_SOCKET, None, needs_response=needs_response, serialize=serialize) # ################################################################################################################################ def handle_client_message(self, cid, msg, _action=WEB_SOCKET.ACTION): self._handle_client_response(cid, msg) if msg.action == _action.CLIENT_RESPONSE else self._handle_invoke_service(cid, msg) # ################################################################################################################################ def _handle_invoke_service(self, cid, msg): try: service_response = self.invoke_service(self.config.service_name, msg.data, cid=cid) except Exception as e: logger.warn('Service `%s` could not be invoked, id:`%s` cid:`%s`, conn:`%s`, e:`%s`', self.config.service_name, msg.id, cid, self.peer_conn_info_pretty, format_exc()) # Errors known to map to HTTP ones if isinstance(e, Reportable): status = e.status error_message = e.msg # Catch SimpleIO-related errors, i.e. missing input parameters elif isinstance(e, ParsingException): status = BAD_REQUEST error_message = 'I/O processing error' # Anything else else: status = INTERNAL_SERVER_ERROR error_message = 'Internal server error' response = ErrorResponse(cid, msg.id, status, error_message) else: response = OKResponse(cid, msg.id, service_response) serialized = response.serialize() logger.info('Sending response `%s` from to `%s` `%s` `%s` `%s`', serialized, self.python_id, self.pub_client_id, self.ext_client_id, self.ext_client_name, self.peer_conn_info_pretty) try: self.send(serialized) except AttributeError as e: if e.message == "'NoneType' object has no attribute 'text_message'": _msg = 'Service response discarded (client disconnected), cid:`%s`, msg.meta:`%s`' _meta = msg.get_meta() logger.warn(_msg, _meta) logger_zato.warn(_msg, _meta) # ################################################################################################################################ def _wait_for_event(self, wait_time, condition_callable, _now=datetime.utcnow, _delta=timedelta, _sleep=sleep, *args, **kwargs): now = _now() until = now + _delta(seconds=wait_time) while now < until: response = condition_callable(*args, **kwargs) if response: return response else: _sleep(0.01) now = _now() # ################################################################################################################################ def _handle_client_response(self, cid, msg, _msg_id_prefix=MSG_PREFIX.MSG_ID): """ Processes responses from WSX clients - either invokes callbacks for pub/sub responses or adds the message to the list of received ones because someone is waiting for it. """ # Pub/sub response if msg.in_reply_to.startswith(_msg_id_prefix): hook = self.get_on_pubsub_hook() if not hook: log_msg = 'Ignoring pub/sub response, on_pubsub_response hook not implemented for `%s`, conn:`%s`, msg:`%s`' logger.warn(log_msg, self.config.name, self.peer_conn_info_pretty, msg) logger_zato.warn(log_msg, self.config.name, self.peer_conn_info_pretty, msg) else: request = self._get_hook_request() request['msg'] = msg hook(**request) # Regular synchronous response, simply enqueue it and someone else will take care of it else: self.responses_received[msg.in_reply_to] = msg def _has_client_response(self, request_id): return self.responses_received.get(request_id) def _wait_for_client_response(self, request_id, wait_time=5): """ Wait until a response from client arrives and return it or return None if there is no response up to wait_time. """ return self._wait_for_event(wait_time, self._has_client_response, request_id=request_id) # ################################################################################################################################ def _received_message(self, data, _now=datetime.utcnow, _default_data='', *args, **kwargs): # This is one of methods that can be invoked before self.__init__ completes, # because self's parent manages the underlying TCP stream, in which can self # will not be fully initialized yet so we need to wait a bit until it is. while not self._initialized: sleep(0.1) try: request = self._parse_func(data or _default_data) cid = new_cid() now = _now() self.last_seen = now logger.info('Request received cid:`%s`, client:`%s`', cid, self.pub_client_id) # If client is authenticated, allow it to re-authenticate, which grants a new token, or to invoke a service. # Otherwise, authentication is required. if self.has_session_opened: # Reject request if an already existing token was not given on input, it should have been # because the client is authenticated after all. if not request.token: self.on_forbidden('did not send token') return # Reject request if token is provided but it already expired if _now() > self.token.expires_at: self.on_forbidden('used an expired token') return # Ok, we can proceed try: self.handle_client_message(cid, request) if not request.is_auth else self.handle_create_session(cid, request) except RuntimeError as e: if e.message == 'Cannot send on a terminated websocket': msg = 'Ignoring message (client disconnected), cid:`%s`, request:`%s` conn:`%s`' logger.info(msg, cid, request, self.peer_conn_info_pretty) logger_zato.info(msg, cid, request, self.peer_conn_info_pretty) else: raise # Unauthenticated - require credentials on input else: self.handle_create_session(cid, request) logger.info('Response returned cid:`%s`, time:`%s`', cid, _now()-now) except Exception, e: logger.warn(format_exc(e))
def _init(self): # Python-level ID contains all the core details, our own ID and that of the thread (greenlet) that creates us _current_thread = current_thread() python_id = '{}.{}.{}'.format(hex(id(self)), _current_thread.name, hex(_current_thread.ident)) # Assign core attributes to this object before calling parent class self.python_id = python_id # Must be set here and then to True later on because our parent class may already want # to accept connections, and we need to postpone their processing until we are initialized fully. self._initialized = False self.has_session_opened = False self._token = None self.update_lock = RLock() self.ext_client_id = None self.ext_client_name = None self.connection_time = self.last_seen = datetime.utcnow() self.sec_type = self.config.sec_type self.pings_missed = 0 self.pings_missed_threshold = self.config.get('pings_missed_threshold', 5) self.user_data = Bunch() # Arbitrary user-defined data self._disconnect_requested = False # Have we been asked to disconnect this client? # Last the we received a ping response (pong) from our peer self.ping_last_response_time = None # # If the peer ever subscribes to a pub/sub topic we will periodically # store in the ODB information about the last time the peer either sent # or received anything from us. Note that we store it if: # # * The peer has at least one subscription, and # * At least self.pubsub_interact_interval seconds elapsed since the last update # # And: # # * The peer received a pub/sub message, or # * The peer sent a pub/sub message # # Or: # # * The peer did not send or receive anything, but # * The peer correctly responds to ping messages # # Such a logic ensures that we do not overwhelm the database with frequent updates # if the peer uses pub/sub heavily - it is costly to do it for each message. # # At the same time, if the peer does not receive or send anything but it is still connected # (because it responds to ping) we set its SQL status too. # # All of this lets background processes clean up WSX clients that subscribe at one # point but they are never seen again, which may (theoretically) happen if a peer disconnects # in a way that does not allow for Zato to clean up its subscription status in the ODB. # self.pubsub_interact_interval = WEB_SOCKET.DEFAULT.INTERACT_UPDATE_INTERVAL self.interact_last_updated = None self.last_interact_source = None self.interact_last_set = None # Manages access to service hooks if self.config.hook_service: self.hook_tool = HookTool(self.config.parallel_server, HookCtx, hook_type_to_method, self.invoke_service) self.on_connected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_CONNECTED) self.on_disconnected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED) self.on_pubsub_response_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_PUBSUB_RESPONSE) else: self.hook_tool = None self.on_connected_service_invoker = None self.on_disconnected_service_invoker = None self.on_pubsub_response_service_invoker = None # For publish/subscribe over WSX self.pubsub_tool = PubSubTool(self.config.parallel_server.worker_store.pubsub, self, PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id, self.deliver_pubsub_msg) # Active WebSocket client ID (WebSocketClient model, web_socket_client.id in SQL) self._sql_ws_client_id = None # For tokens assigned externally independent of our WS-level self.token. # Such tokens will be generated by Vault, for instance. self.ext_token = None # Drop WSGI keys pointing to complex Python objects such as sockets for name in _wsgi_drop_keys: self.initial_http_wsgi_environ.pop(name, None) # Responses to previously sent requests - keyed by request IDs self.responses_received = {} _local_address = self.sock.getsockname() self._local_address = '{}:{}'.format(_local_address[0], _local_address[1]) _peer_address = self.sock.getpeername() self._peer_address = '{}:{}'.format(_peer_address[0], _peer_address[1]) self.forwarded_for = self.initial_http_wsgi_environ.get('HTTP_X_FORWARDED_FOR') if self.forwarded_for: self.forwarded_for_fqdn = socket.getfqdn(self.forwarded_for) else: self.forwarded_for_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN _peer_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN try: self._peer_host = socket.gethostbyaddr(_peer_address[0])[0] _peer_fqdn = socket.getfqdn(self._peer_host) except Exception: logger.warn(format_exc()) finally: self._peer_fqdn = _peer_fqdn self.peer_conn_info_pretty = self.get_peer_info_pretty() self._parse_func = { DATA_FORMAT.JSON: self.parse_json, DATA_FORMAT.XML: self.parse_xml, }[self.config.data_format] # All set, we can process connections now self._initialized = True
def _init(self): # Python-level ID contains all the core details, our own ID and that of the thread (greenlet) that creates us _current_thread = current_thread() python_id = '{}.{}.{}'.format(hex(id(self)), _current_thread.name, hex(_current_thread.ident)) # Assign core attributes to this object before calling parent class self.python_id = python_id # Must be set here and then to True later on because our parent class may already want # to accept connections, and we need to postpone their processing until we are initialized fully. self._initialized = False self.has_session_opened = False self._token = None self.update_lock = RLock() self.ext_client_id = None self.ext_client_name = None self.connection_time = self.last_seen = datetime.utcnow() self.sec_type = self.config.sec_type self.pings_missed = 0 self.pings_missed_threshold = self.config.get('pings_missed_threshold', 5) self.ping_last_response_time = None self.user_data = Bunch() # Arbitrary user-defined data self._disconnect_requested = False # Have we been asked to disconnect this client? # Manages access to service hooks if self.config.hook_service: self.hook_tool = HookTool(self.config.parallel_server, HookCtx, hook_type_to_method, self.invoke_service) self.on_connected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_CONNECTED) self.on_disconnected_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_DISCONNECTED) self.on_pubsub_response_service_invoker = self.hook_tool.get_hook_service_invoker( self.config.hook_service, WEB_SOCKET.HOOK_TYPE.ON_PUBSUB_RESPONSE) else: self.hook_tool = None self.on_connected_service_invoker = None self.on_disconnected_service_invoker = None self.on_pubsub_response_service_invoker = None # For publish/subscribe over WSX self.pubsub_tool = PubSubTool(self.config.parallel_server.worker_store.pubsub, self, PUBSUB.ENDPOINT_TYPE.WEB_SOCKETS.id, self.deliver_pubsub_msg) # Active WebSocket client ID (WebSocketClient model, web_socket_client.id in SQL) self._sql_ws_client_id = None # For tokens assigned externally independent of our WS-level self.token. # Such tokens will be generated by Vault, for instance. self.ext_token = None # Drop WSGI keys pointing to complex Python objects such as sockets for name in _wsgi_drop_keys: self.initial_http_wsgi_environ.pop(name, None) # Responses to previously sent requests - keyed by request IDs self.responses_received = {} _local_address = self.sock.getsockname() self._local_address = '{}:{}'.format(_local_address[0], _local_address[1]) _peer_address = self.sock.getpeername() self._peer_address = '{}:{}'.format(_peer_address[0], _peer_address[1]) self.forwarded_for = self.initial_http_wsgi_environ.get('HTTP_X_FORWARDED_FOR') if self.forwarded_for: self.forwarded_for_fqdn = socket.getfqdn(self.forwarded_for) else: self.forwarded_for_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN _peer_fqdn = WEB_SOCKET.DEFAULT.FQDN_UNKNOWN try: self._peer_host = socket.gethostbyaddr(_peer_address[0])[0] _peer_fqdn = socket.getfqdn(self._peer_host) except Exception: logger.warn(format_exc()) finally: self._peer_fqdn = _peer_fqdn self.peer_conn_info_pretty = self.get_peer_info_pretty() self._parse_func = { DATA_FORMAT.JSON: self.parse_json, DATA_FORMAT.XML: self.parse_xml, }[self.config.data_format] # All set, we can process connections now self._initialized = True