Ejemplo n.º 1
0
def response_hook(self, input, instance, attrs, service_type):
    if service_type == 'get_list':

        # Details are needed when topics are in their own main screen but if only basic information
        # is needed, like a list of topic IDs and names, we don't need to look up additional details.
        # The latter is the case of the message publication screen which simply needs a list of topic IDs/names.
        if input.get('needs_details', True):

            with closing(self.odb.session()) as session:
                for item in self.response.payload:

                    # Checks current non-GD depth on all servers
                    item.current_depth_non_gd = self.invoke(
                        'zato.pubsub.topic.collect-non-gd-depth', {
                            'topic_name': item.name,
                        })['response']['current_depth_non_gd']

                    # Checks current GD depth in SQL
                    item.current_depth_gd = get_gd_depth_topic(
                        session, input.cluster_id, item.id)

                    last_data = get_last_pub_data(self.kvdb.conn,
                                                  self.server.cluster_id,
                                                  item.id)
                    if last_data:
                        item.last_pub_time = last_data['pub_time']
                        item.last_pub_has_gd = last_data['has_gd']
                        item.last_pub_msg_id = last_data['pub_msg_id']
                        item.last_endpoint_id = last_data['endpoint_id']
                        item.last_endpoint_name = last_data['endpoint_name']
                        item.last_pub_server_pid = last_data.get('server_pid')
                        item.last_pub_server_name = last_data.get(
                            'server_name')
Ejemplo n.º 2
0
    def handle(self):
        with closing(self.odb.session()) as session:
            topic = pubsub_topic(session, self.request.input.cluster_id, self.request.input.id)
            topic['current_depth_gd'] = get_gd_depth_topic(session, self.request.input.cluster_id, self.request.input.id)

        last_data = get_last_pub_data(self.kvdb.conn, self.server.cluster_id, self.request.input.id)
        if last_data:
            topic['last_pub_time'] = last_data['pub_time']

        self.response.payload = topic
Ejemplo n.º 3
0
    def _publish(self, ctx):
        # Type: PubCtx
        """ Publishes GD and non-GD messages to topics and, if subscribers exist, moves them to their queues / notifies them.
        """
        len_gd_msg_list = len(ctx.gd_msg_list)
        has_gd_msg_list = bool(len_gd_msg_list)

        # Just so it is not overlooked, log information that no subscribers are found for this topic
        if not ctx.subscriptions_by_topic:

            log_msg = 'No matching subscribers found for topic `%s` (cid:%s, rr:%d)'
            log_msg_args = ctx.topic.name, self.cid, ctx.is_re_run

            # There are no subscribers and depending on configuration we are to drop messages
            # for whom no one is waiting or continue and place them in the topic directly.
            if ctx.topic.config.get(
                    'on_no_subs_pub') == PUBSUB.ON_NO_SUBS_PUB.DROP.id:
                log_msg_drop = 'Dropping messages. ' + log_msg
                self.logger.info(log_msg_drop, *log_msg_args)
                logger_pubsub.info(log_msg_drop, *log_msg_args)
                return
            else:
                self.logger.info(log_msg, *log_msg_args)
                logger_pubsub.info(log_msg, *log_msg_args)

        # Local aliases
        has_pubsub_audit_log = self.server.has_pubsub_audit_log

        # Increase message counters for this pub/sub server and endpoint
        ctx.pubsub.incr_pubsub_msg_counter(ctx.endpoint_id)

        # Increase message counter for this topic
        ctx.topic.incr_topic_msg_counter(has_gd_msg_list,
                                         bool(ctx.non_gd_msg_list))

        # We don't always have GD messages on input so there is no point in running an SQL transaction otherwise.
        if has_gd_msg_list:

            with closing(self.odb.session()) as session:

                # No matter if we can publish or not, we may possibly cleanup old messages first.
                if ctx.topic.needs_msg_cleanup():
                    self._cleanup_sql_data(session, ctx.cluster_id,
                                           ctx.topic.id, ctx.now)

                # .. test first if we should check the depth in this iteration.
                if ctx.topic.needs_depth_check():

                    # Get current depth of this topic ..
                    ctx.current_depth = get_gd_depth_topic(
                        session, ctx.cluster_id, ctx.topic.id)

                    # .. and abort if max depth is already reached.
                    if ctx.current_depth + len_gd_msg_list > ctx.topic.max_depth_gd:
                        self.reject_publication(ctx.topic.name, True)
                    else:

                        # This only updates the local ctx variable
                        ctx.current_depth = ctx.current_depth + len_gd_msg_list

                pub_msg_list = [elem['pub_msg_id'] for elem in ctx.gd_msg_list]

                if has_logger_pubsub_debug:
                    logger_pubsub.debug(_inserting_gd_msg, ctx.topic.name,
                                        pub_msg_list, ctx.endpoint_name,
                                        ctx.ext_client_id, self.cid)

                # This is the call that runs SQL INSERT statements with messages for topics and subscriber queues
                sql_publish_with_retry(session, self.cid, ctx.cluster_id,
                                       ctx.topic.id,
                                       ctx.subscriptions_by_topic,
                                       ctx.gd_msg_list, ctx.now)

                # Run an SQL commit for all queries above ..
                session.commit()

            # .. and set a flag to signal that there are some GD messages available
            ctx.pubsub.set_sync_has_msg(ctx.topic.id, True, True,
                                        'Publish.publish', ctx.now)

        # Either commit succeeded or there were no GD messages on input but in both cases we can now,
        # optionally, store data in pub/sub audit log.
        if has_pubsub_audit_log:

            msg = 'Message published. CID:`%s`, topic:`%s`, from:`%s`, ext_client_id:`%s`, pattern:`%s`, new_depth:`%s`' \
                  ', GD data:`%s`, non-GD data:`%s`'

            logger_audit.info(msg, self.cid, ctx.topic.name,
                              self.pubsub.endpoints[ctx.endpoint_id].name,
                              ctx.ext_client_id, ctx.pub_pattern_matched,
                              ctx.current_depth, ctx.gd_msg_list,
                              ctx.non_gd_msg_list)

        # If this is the very first time we are running during this invocation, try to deliver non-GD messages
        if not ctx.is_re_run:

            if ctx.subscriptions_by_topic:

                # Place all the non-GD messages in the in-RAM sync backlog
                if ctx.non_gd_msg_list:
                    ctx.pubsub.store_in_ram(
                        self.cid, ctx.topic.id, ctx.topic.name,
                        [item.sub_key for item in ctx.subscriptions_by_topic],
                        ctx.non_gd_msg_list)

            # .. however, if there are no subscriptions at the moment while there are non-GD messages,
            # we need to re-run again and publish all such messages as GD ones. This is because if there
            # are no subscriptions, we do not know to what delivery server they should go, so it's safest
            # to store them in SQL.
            else:
                if ctx.non_gd_msg_list:

                    # Turn all non-GD messages into GD ones.
                    for msg in ctx.non_gd_msg_list:
                        msg['has_gd'] = True

                        logger_pubsub.info(
                            _log_turning_gd_msg.format('no subscribers'),
                            msg['pub_msg_id'])

                        data_prefix, data_prefix_short = self._get_data_prefixes(
                            msg['data'])
                        msg['data_prefix'] = data_prefix
                        msg['data_prefix_short'] = data_prefix_short

                    # Note the reversed order - now non-GD messages are sent as GD ones and the list of non-GD messages is empty.
                    ctx.gd_msg_list = ctx.non_gd_msg_list[:]
                    ctx.non_gd_msg_list[:] = []
                    ctx.is_re_run = True

                    # Re-run with GD and non-GD reversed now
                    self._publish(ctx)

        # Update topic and endpoint metadata in background if configured to - we have a series of if's to confirm
        # if it's needed because it is not a given that each publication will required the update and we also
        # want to ensure that if there are two thigns to be updated at a time, it is only one greenlet spawned
        # which will in turn use a single Redis pipeline to cut down on the number of Redis calls needed.
        if ctx.pubsub.has_meta_topic or ctx.pubsub.has_meta_endpoint:

            if ctx.pubsub.has_meta_topic and ctx.topic.needs_meta_update():
                has_topic = True
            else:
                has_topic = False

            if ctx.pubsub.has_meta_endpoint and ctx.pubsub.needs_endpoint_meta_update:
                has_endpoint = True
            else:
                has_endpoint = False

            if has_topic or has_endpoint:
                spawn(self._update_pub_metadata, ctx, has_topic, has_endpoint,
                      ctx.pubsub.endpoint_meta_data_len,
                      ctx.pubsub.endpoint_meta_max_history)

        # Return either a single msg_id if there was only one message published or a list of message IDs,
        # one for each message published.
        len_msg_list = len_gd_msg_list + len(ctx.non_gd_msg_list)

        if len_msg_list == 1:
            self.response.payload.msg_id = ctx.msg_id_list[0]
        else:
            self.response.payload.msg_id_list = ctx.msg_id_list