def handle(self): response = [] with closing(self.odb.session()) as session: for item in self.get_data(session): self._add_queue_depths(session, item) item.creation_time = datetime_from_ms(item.creation_time * 1000.0) if item.last_interaction_time: item.last_interaction_time = datetime_from_ms( item.last_interaction_time * 1000.0) if item.last_interaction_details: if not isinstance(item.last_interaction_details, unicode): item.last_interaction_details = item.last_interaction_details.decode( 'utf8') response.append(item) self.response.payload[:] = response
def handle(self): msg = self.pubsub.sync_backlog.get_message_by_id( self.request.input.msg_id) # We need to re-arrange attributes but we don't want to update the original message in place msg = deepcopy(msg) msg['msg_id'] = msg.pop('pub_msg_id') msg['correl_id'] = msg.pop('pub_correl_id', None) msg['pub_time'] = datetime_from_ms(msg['pub_time'] * 1000.0) expiration_time = msg.pop('expiration_time', None) if expiration_time: msg['expiration_time'] = datetime_from_ms(expiration_time * 1000.0) msg['endpoint_id'] = msg.pop('published_by_id') msg['endpoint_name'] = self.pubsub.get_endpoint_by_id( msg['endpoint_id']).name self.response.payload = msg
def handle(self, _not_given=object()): with closing(self.odb.session()) as session: needs_sub_queue_check = self.request.input.get( 'needs_sub_queue_check', _not_given) needs_sub_queue_check = needs_sub_queue_check if needs_sub_queue_check is not _not_given else True item = pubsub_message(session, self.request.input.cluster_id, self.request.input.msg_id, needs_sub_queue_check).\ first() if item: item.pub_time = datetime_from_ms(item.pub_time * 1000) item.ext_pub_time = datetime_from_ms( item.ext_pub_time * 1000) if item.ext_pub_time else '' item.expiration_time = datetime_from_ms( item.expiration_time * 1000) if item.expiration_time else '' self.response.payload = item else: raise NotFound( self.cid, 'No such message `{}`'.format(self.request.input.msg_id))
def handle( self, _msg='Cleaning up WSX pub/sub, channel:`%s`, now:`%s (%s)`, md:`%s`, ma:`%s` (%s)' ): # We receive a multi-line list of WSX channel name -> max timeout accepted on input config = parse_extra_into_dict(self.request.raw_request) with closing(self.odb.session()) as session: # Delete stale connections for each subscriber for channel_name, max_delta in config.items(): # Input timeout is in minutes but timestamps in ODB are in seconds # so we convert the minutes to seconds, as expected by the database. max_delta = max_delta * 60 # We compare everything using seconds now = utcnow_as_ms() # Laster interaction time for each connection must not be older than that many seconds ago max_allowed = now - max_delta now_as_iso = datetime_from_ms(now * 1000) max_allowed_as_iso = datetime_from_ms(max_allowed * 1000) self.logger.info(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) logger_pubsub.info(_msg, channel_name, now_as_iso, now, max_delta, max_allowed_as_iso, max_allowed) # Delete old connections for that channel session.execute( SubscriptionDelete().\ where(SubscriptionTable.c.ws_channel_id==WSXChannelTable.c.id).\ where(WSXChannelTable.c.name==channel_name).\ where(SubscriptionTable.c.last_interaction_time < max_allowed) ) # Commit all deletions session.commit()
def handle(self, _channel=CHANNEL.IBM_MQ, ts_format='YYYYMMDDHHmmssSS'): request = loads(self.request.raw_request) msg = request['msg'] service_name = request['service_name'] # Make MQ-level attributes easier to handle correlation_id = unhexlify( msg['correlation_id']) if msg['correlation_id'] else None expiration = datetime_from_ms( msg['expiration']) if msg['expiration'] else None timestamp = '{}{}'.format(msg['put_date'], msg['put_time']) timestamp = arrow_get(timestamp, ts_format).replace(tzinfo='UTC').datetime # Extract MQMD mqmd = msg['mqmd'] mqmd = b64decode(mqmd) mqmd = pickle_loads(mqmd) # Find the message's CCSID request_ccsid = mqmd.CodedCharSetId # Try to find an encoding matching the CCSID, # if not found, use the default one. try: encoding = CCSIDConfig.encoding_map[request_ccsid] except KeyError: encoding = CCSIDConfig.default_encoding # Encode the input Unicode data into bytes msg['text'] = msg['text'].encode(encoding) # Extract the business payload data = payload_from_request(self.server.json_parser, self.cid, msg['text'], request['data_format'], None) # Invoke the target service self.invoke(service_name, data, _channel, wmq_ctx={ 'msg_id': unhexlify(msg['msg_id']), 'correlation_id': correlation_id, 'timestamp': timestamp, 'put_time': msg['put_time'], 'put_date': msg['put_date'], 'expiration': expiration, 'reply_to': msg['reply_to'], 'data': data, 'mqmd': mqmd })
def set_non_gd_msg_list_response(self, msg_list, cur_page, _sort_key=itemgetter('pub_time')): """ Paginates a list of non-GD messages (from topics or queues) and returns results. """ cur_page = cur_page - 1 if cur_page else 0 # We index lists from 0 # Set it here because later on it may be shortened to the page_size of elements total = len(msg_list) # If we get here, we must have collected some data at all if msg_list: # Sort the output before it is returned - messages last published (youngest) come first msg_list.sort(key=_sort_key, reverse=True) start = cur_page * _page_size end = start + _page_size msg_list = msg_list[start:end] for msg in msg_list: # Convert float timestamps in all the remaining messages to ISO-8601 msg['pub_time'] = datetime_from_ms(msg['pub_time'] * 1000.0) if msg.get('expiration_time'): msg['expiration_time'] = datetime_from_ms( msg['expiration_time'] * 1000.0) # Return endpoint information in the same format GD messages are returned in msg['endpoint_id'] = msg.pop('published_by_id') msg['endpoint_name'] = self.pubsub.get_endpoint_by_id( msg['endpoint_id']).name search_results = SearchResults(None, None, None, total) search_results.set_data(cur_page, _page_size) # This goes to the service's response payload object self.response.payload.response = msg_list self.response.payload._meta = search_results.to_dict()
def handle(self): ps_tool = self.pubsub.get_pubsub_tool_by_sub_key(self.request.input.sub_key) messages = ps_tool.get_messages(self.request.input.sub_key, False) data_prefix_len = self.pubsub.data_prefix_len data_prefix_short_len = self.pubsub.data_prefix_short_len self.response.payload[:] = [ make_short_msg_copy_from_msg(elem, data_prefix_len, data_prefix_short_len) for elem in messages] for elem in self.response.payload: elem['recv_time'] = datetime_from_ms(elem['recv_time'] * 1000.0) elem['published_by_name'] = self.pubsub.get_endpoint_by_id(elem['published_by_id']).name
def handle(self): with closing(self.odb.session()) as session: item = pubsub_queue_message(session, self.request.input.cluster_id, self.request.input.msg_id).\ first() if item: item.expiration = item.expiration or None item_dict = item._asdict() for name in('expiration_time', 'recv_time', 'ext_pub_time', 'last_delivery_time'): value = item_dict.get(name) if value: item_dict[name] = datetime_from_ms(value * 1000.0) self.response.payload = item_dict self.response.payload['published_by_name'] = self.pubsub.get_endpoint_by_id(item_dict['published_by_id']).name else: raise NotFound(self.cid, 'No such message `{}`'.format(self.request.input.msg_id))
def handle(self): input = self.request.input self.response.payload.msg_id = input.msg_id session = self.odb.session() if self._message_update_has_gd else None try: # Get that from its storage, no matter what it is item = self._get_item(input, session) if session and (not item): self.response.payload.found = False return item.data = input.data.encode('utf8') item.data_prefix = input.data[:self.pubsub.data_prefix_len].encode( 'utf8') item.data_prefix_short = input.data[:self.pubsub. data_prefix_short_len].encode( 'utf8') item.size = len(input.data) item.expiration = get_expiration(self.cid, input) item.priority = get_priority(self.cid, input) item.msg_id = input.msg_id item.pub_correl_id = input.correl_id item.in_reply_to = input.in_reply_to item.mime_type = input.mime_type if item.expiration: if self.request.input.exp_from_now: from_ = utcnow_as_ms() else: from_ = item.pub_time item.expiration_time = from_ + (item.expiration / 1000.0) else: item.expiration_time = 'zzz' # Save data to its storage, SQL for GD and RAM for non-GD messages found = self._save_item(item, input, session) self.response.payload.found = found self.response.payload.size = item.size self.response.payload.expiration_time = datetime_from_ms( item.expiration_time * 1000.0) if item.expiration_time else None finally: if session: session.close()
def handle(self, _utcnow=datetime.utcnow): input = self.request.input with closing(self.odb.session()) as session: item = session.query(PubSubMessage).\ filter(PubSubMessage.cluster_id==input.cluster_id).\ filter(PubSubMessage.pub_msg_id==input.msg_id).\ first() if not item: self.response.payload.found = False return item.data = input.data.encode('utf8') item.data_prefix = input.data[:self.pubsub.data_prefix_len].encode( 'utf8') item.data_prefix_short = input.data[:self.pubsub. data_prefix_short_len].encode( 'utf8') item.size = len(input.data) item.expiration = get_expiration(self.cid, input) item.priority = get_priority(self.cid, input) item.pub_correl_id = input.correl_id item.in_reply_to = input.in_reply_to item.mime_type = input.mime_type if item.expiration: if self.request.input.exp_from_now: from_ = utcnow_as_ms() else: from_ = item.pub_time item.expiration_time = from_ + (item.expiration / 1000.0) else: item.expiration_time = None session.add(item) session.commit() self.response.payload.found = True self.response.payload.size = item.size self.response.payload.expiration_time = datetime_from_ms( item.expiration_time * 1000.0) if item.expiration_time else None
def handle(self): with closing(self.odb.session()) as session: item = pubsub_queue_message(session, self.request.input.cluster_id, self.request.input.msg_id).\ first() if item: item.expiration = item.expiration or None for name in ('expiration_time', 'recv_time', 'ext_pub_time', 'last_delivery_time'): value = getattr(item, name, None) if value: setattr(item, name, datetime_from_ms(value)) self.response.payload = item else: raise NotFound( self.cid, 'No such message `{}`'.format(self.request.input.msg_id))
def handle(self): pubsub_tool = self.pubsub.get_pubsub_tool_by_sub_key(self.request.input.sub_key) msg = pubsub_tool.get_message(self.request.input.sub_key, self.request.input.msg_id) if msg: msg = msg.to_dict() msg['msg_id'] = msg.pop('pub_msg_id') msg['correl_id'] = msg.pop('pub_correl_id', None) for name in ('pub_time', 'ext_pub_time', 'expiration_time', 'recv_time'): value = msg.pop(name, None) if value: msg[name] = datetime_from_ms(value * 1000.0) msg['published_by_name'] = self.pubsub.get_endpoint_by_id(msg['published_by_id']).name subscriber_id = self.pubsub.get_subscription_by_sub_key(self.request.input.sub_key).endpoint_id subscriber_name = self.pubsub.get_endpoint_by_id(subscriber_id).name msg['subscriber_id'] = subscriber_id msg['subscriber_name'] = subscriber_name self.response.payload = msg
def handle(self, _channel=CHANNEL.WEBSPHERE_MQ, ts_format='YYYYMMDDHHmmssSS'): request = loads(self.request.raw_request) msg = request['msg'] service_name = request['service_name'] # Make MQ-level attributes easier to handle correlation_id = unhexlify( msg['correlation_id']) if msg['correlation_id'] else None expiration = datetime_from_ms( msg['expiration']) if msg['expiration'] else None timestamp = '{}{}'.format(msg['put_date'], msg['put_time']) timestamp = arrow_get(timestamp, ts_format).replace(tzinfo='UTC').datetime data = payload_from_request(self.cid, msg['text'], request['data_format'], None) msg['mqmd'] = b64decode(msg['mqmd']) self.invoke(service_name, data, _channel, wmq_ctx={ 'msg_id': unhexlify(msg['msg_id']), 'correlation_id': correlation_id, 'timestamp': timestamp, 'put_time': msg['put_time'], 'put_date': msg['put_date'], 'expiration': expiration, 'reply_to': msg['reply_to'], 'data': data, 'mqmd': pickle_loads(msg['mqmd']) })
def get_last_pub_data(conn, cluster_id, topic_id, _topic_key=COMMON_PUBSUB.REDIS.META_TOPIC_LAST_KEY): last_data = conn.hgetall(_topic_key % (cluster_id, topic_id)) if last_data: last_data['pub_time'] = datetime_from_ms(float(last_data['pub_time']) * 1000) return last_data
def handle(self): with closing(self.odb.session()) as session: data = self.get_data(session) for item in data: item.creation_time = datetime_from_ms(item.creation_time * 1000) self.response.payload[:] = data
def handle(self): with closing(self.odb.session()) as session: item = pubsub_endpoint_queue(session, self.request.input.cluster_id, self.request.input.id) item.creation_time = datetime_from_ms(item.creation_time * 1000.0) self.response.payload = item self._add_queue_depths(session, self.response.payload)
def handle(self): total_tasks = 0 tasks_running = 0 tasks_stopped = 0 total_messages = 0 messages_gd = 0 messages_non_gd = 0 total_sub_keys = 0 topics_seen = set() max_last_gd_run = 0 max_last_task_run = 0 for item in self.pubsub.pubsub_tools: total_tasks += len(item.delivery_tasks) total_sub_keys += len(item.sub_keys) item_last_gd_run = item.last_gd_run item_last_gd_run_values = item_last_gd_run.values( ) if item_last_gd_run else [] max_item_last_gd_run = max( item_last_gd_run_values) if item_last_gd_run_values else 0 max_last_gd_run = max(max_last_gd_run, max_item_last_gd_run) for task in item.get_delivery_tasks(): max_last_task_run = max(max_last_task_run, task.last_iter_run) topics_seen.add(task.topic_name) if task.is_running(): tasks_running += 1 else: tasks_stopped += 1 gd_depth, non_gd_depth = task.get_queue_depth() total_messages += gd_depth total_messages += non_gd_depth messages_gd += gd_depth messages_non_gd += non_gd_depth self.response.payload.tasks = total_tasks self.response.payload.tasks_running = tasks_running self.response.payload.tasks_stopped = tasks_stopped self.response.payload.messages = total_messages self.response.payload.messages_gd = messages_gd self.response.payload.messages_non_gd = messages_non_gd self.response.payload.topics = len(topics_seen) self.response.payload.sub_keys = total_sub_keys if max_last_gd_run: max_last_gd_run = datetime_from_ms(max_last_gd_run * 1000) if max_last_task_run: max_last_task_run = datetime_from_ms(max_last_task_run * 1000) self.response.payload.last_gd_run = max_last_gd_run self.response.payload.last_task_run = max_last_task_run
def handle(self): # How far back are we to reach out to find old connections max_allowed = self._get_max_allowed() with closing(self.odb.session()) as session: # Find the old connections now result = self._find_old_wsx_connections(session, max_allowed) # Nothing to do, we can return if not result: return # At least one old connection was found wsx_clients = {} # Maps pub_client_id -> _CleanupWSX object wsx_sub_key = {} # Maps pub_client_id -> a list of its sub_keys for item in result: wsx = wsx_clients.setdefault(item.pub_client_id, _CleanupWSX()) wsx.pub_client_id = item.pub_client_id sk_list = wsx_sub_key.setdefault(item.pub_client_id, []) sk_list.append(item.sub_key) len_found = len(wsx_clients) suffix = '' if len_found == 1 else 's' self._issue_log_msg(_msg.found, len_found, suffix) for idx, (pub_client_id, wsx) in enumerate(iteritems(wsx_clients), 1): # All subscription keys for that WSX, we are adding it here # so that below, for logging purposes, we are able to say # what subscriptions are being actually deleted. wsx.sk_dict = {}.fromkeys(wsx_sub_key[pub_client_id]) # For each subscription of that WSX, add its details to the sk_dict for sub_key in wsx.sk_dict: sub = self.pubsub.get_subscription_by_sub_key(sub_key) if sub: wsx.sk_dict[sub_key] = { 'creation_time': datetime_from_ms(sub.creation_time), 'topic_id': sub.topic_id, 'topic_name': sub.topic_name, 'ext_client_id': sub.ext_client_id, 'endpoint_type': sub.config['endpoint_type'], 'sub_pattern_matched': sub.sub_pattern_matched, } # Log what we are about to do self._issue_log_msg(_msg.cleaning, idx, len_found, wsx.to_dict()) # Unsubscribe the WebSocket first for sub_key, info in wsx.sk_dict.items(): # Object 'info' may be None if we are called while the WSX connection # is still alive but did not respond to pings, in which case it cannot be cleaned up. if info: self._issue_log_msg(_msg.unsubscribing, sub_key, info['ext_client_id'], info['topic_name']) self.invoke('zato.pubsub.pubapi.unsubscribe', { 'sub_key': sub_key, 'topic_name': info['topic_name'], }) # Delete the WebSocket's state in SQL now self._issue_log_msg(_msg.deleting, wsx.pub_client_id) with closing(self.odb.session()) as session: session.execute( WSXClientDelete().\ where(WSXClientTable.c.pub_client_id==wsx.pub_client_id) ) session.commit() # Log information that this particular connection is done with # (note that for clarity, this part does not reiterate the subscription's details) self._issue_log_msg(_msg.cleaned_up, idx, len_found, wsx.pub_client_id)
def handle(self, _sort_key=itemgetter('pub_time')): # Local aliases topic_id = self.request.input.topic_id paginate = self.request.input.paginate cur_page = self.request.input.cur_page cur_page = cur_page - 1 if cur_page else 0 # We index lists from 0 # Response to produce msg_list = [] # Collects responses from all server processes is_all_ok, all_data = self.servers.invoke_all( 'zato.pubsub.topic.get-server-message-list', { 'topic_id': topic_id, 'query': self.request.input.query, }, timeout=30) # Check if everything is OK on each level - overall, per server and then per process if is_all_ok: for server_name, server_data in all_data.iteritems(): if server_data['is_ok']: for server_pid, server_pid_data in server_data[ 'server_data'].iteritems(): if server_pid_data['is_ok']: pid_data = server_pid_data['pid_data']['response'][ 'data'] msg_list.extend(pid_data) else: self.logger.warn( 'Caught an error (server_pid_data) %s', server_pid_data['error_info']) else: self.logger.warn('Caught an error (server_data) %s', server_data['error_info']) else: self.logger.warn('Caught an error (all_data) %s', all_data) # Set it here because later on it may be shortened to the page_size of elements total = len(msg_list) # If we get here, we must have collected some data at all if msg_list: # Sort the output before it is returned - messages last published (youngest) come first msg_list.sort(key=_sort_key, reverse=True) # If pagination is requsted, return only the desired page if paginate: start = cur_page * _page_size end = start + _page_size msg_list = msg_list[start:end] for msg in msg_list: # Convert float timestamps in all the remaining messages to ISO-8601 msg['pub_time'] = datetime_from_ms(msg['pub_time'] * 1000.0) if msg.get('expiration_time'): msg['expiration_time'] = datetime_from_ms( msg['expiration_time'] * 1000.0) # Return endpoint information in the same format GD messages are returned in msg['endpoint_id'] = msg.pop('published_by_id') msg['endpoint_name'] = self.pubsub.get_endpoint_by_id( msg['endpoint_id']).name search_results = SearchResults(None, None, None, total) search_results.set_data(cur_page, _page_size) # Actual data self.response.payload.response = msg_list # Search metadata self.response.payload._meta = search_results.to_dict()
def response_hook(service, input, instance, attrs, service_type): if service_type == 'get_list': for item in service.response.payload: if item.last_pub_time: item.last_pub_time = datetime_from_ms(item.last_pub_time)
def __repr__(self): return '<Msg sk:{} id:{} ext:{} exp:{} gd:{}>'.format( self.sub_key, self.pub_msg_id, self.ext_client_id, datetime_from_ms(self.expiration_time), self.has_gd)