def start(self):
        try:
            # Create the kafka client
            # assert self.kafka_host is not None
            # assert self.kafka_port is not None
            # kafka_host_port = ":".join((self.kafka_host, self.kafka_port))
            self.kafka_client = KafkaClient(self.kafka_host_port)

            # Get the kafka proxy instance.  If it does not exist then
            # create it
            self.kafka_proxy = get_kafka_proxy()
            if self.kafka_proxy == None:
                KafkaProxy(kafka_endpoint=self.kafka_host_port).start()
                self.kafka_proxy = get_kafka_proxy()

            # Subscribe the default topic and target_cls
            self.topic_target_cls_map[self.default_topic] = self.target_cls

            # Start the queue to handle incoming messages
            reactor.callLater(0, self._received_message_processing_loop)

            # Start listening for incoming messages
            reactor.callLater(0,
                              self.subscribe,
                              self.default_topic,
                              target_cls=self.target_cls)

            # Setup the singleton instance
            IKafkaMessagingProxy._kafka_messaging_instance = self
        except Exception as e:
            log.exception("Failed-to-start-proxy", e=e)
Esempio n. 2
0
    def __init__(self, consul_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = None
        self.consul_endpoint = consul_endpoint
        retrys = 10
        while True:
            try:
                self.kafka_endpoint = get_endpoint_from_consul(
                    self.consul_endpoint, 'kafka')
                break
            except:
                log.error("unable-to-communicate-with-consul")
                self.stop()
            retrys -= 1
            if retrys == 0:
                log.error("unable-to-communicate-with-consul")
                self.stop()
            time.sleep(10)
        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 3
0
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 4
0
    def __init__(self, config):

        self._topics = config['kafka']['topics']

        kafka_client = KafkaClient(
            Config.get('kafka')['server'] + ':' +
            str(Config.get('kafka')['port']))
        self._producer = Producer(kafka_client)
Esempio n. 5
0
def ready_client(reactor, netloc, topic):
    """
    Connect to a Kafka broker and wait for the named topic to exist.
    This assumes that ``auto.create.topics.enable`` is set in the broker
    configuration.
    :raises: `KafkaUnavailableError` if unable to connect.
    """
    client = KafkaClient(netloc, reactor=reactor)

    e = True
    while e:
        yield client.load_metadata_for_topics(topic)
        e = client.metadata_error_for_topic(topic)
        if e:
            log.info("Error getting metadata for topic %r: %s (will retry)",
                     topic, e)

    defer.returnValue(client)
Esempio n. 6
0
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 7
0
def ready_client(reactor, netloc, topic):
    """
    Connect to a Kafka broker and wait for the named topic to exist.
    This assumes that ``auto.create.topics.enable`` is set in the broker
    configuration.

    :raises: `KafkaUnavailableError` if unable to connect.
    """
    client = KafkaClient(netloc, reactor=reactor)

    e = True
    while e:
        yield client.load_metadata_for_topics(topic)
        e = client.metadata_error_for_topic(topic)
        if e:
            log.info("Error getting metadata for topic %r: %s (will retry)",
                     topic, e)

    defer.returnValue(client)
Esempio n. 8
0
    def __init__(self, consul_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')
        # print('kafka endpoint: ', self.kafka_endpoint)
        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 9
0
    def __init__(self, consul_endpoint, kafka_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = kafka_endpoint
        self.consul_endpoint = consul_endpoint

        if kafka_endpoint.startswith('@'):
            retrys = 10
            while True:
                try:
                    self.kafka_endpoint = get_endpoint_from_consul(
                        self.consul_endpoint, kafka_endpoint[1:])
                    break
                except:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                retrys -= 1
                if retrys == 0:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                time.sleep(10)

        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []
Esempio n. 10
0
    def __init__(self, config):

        self._topics = config['kafka']['topics']

        kafka_client = KafkaClient(
            Config.get('kafka')['server'] + ':' +
            str(Config.get('kafka')['port']))
        self._producer = Producer(kafka_client)

        self._dbpools = {}

        for database_name in config['databases']:

            db_config = Config.get('databases')[database_name]

            self._dbpools[database_name] = ReconnectingConnectionPool(
                "pymysql",
                host=db_config['host'],
                user=db_config['user'],
                password=db_config['password'],
                db=db_config['db'],
                charset=db_config['charset'],
                autocommit=True,
                cursorclass=pymysql.cursors.DictCursor)
Esempio n. 11
0
class ConsumerExample(object):
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []

    @inlineCallbacks
    def start(self):
        partitions = []
        try:
            while not partitions:
                yield self._client.load_metadata_for_topics(self.topic)
                e = self._client.metadata_error_for_topic(self.topic)
                if e:
                    log.warning('no-metadata-for-topic', error=e,
                                topic=self.topic)
                else:
                    partitions = self._client.topic_partitions[self.topic]
        except KafkaUnavailableError:
            log.error("unable-to-communicate-with-Kafka-brokers")
            self.stop()

        def _note_consumer_stopped(result, consumer):
            log.info('consumer-stopped', consumer=consumer,
                     result=result)

        for partition in partitions:
            c = Consumer(self._client, self.topic, partition,
                         self.msg_processor)
            self._consumer_list.append(c)
            # log.info('consumer-started', topic=self.topic, partition=partition)
            d = c.start(OFFSET_LATEST)
            d.addBoth(_note_consumer_stopped, c)
            self._consumer_d_list.append(d)

        # Stop ourselves after we've run the allotted time
        reactor.callLater(self.runtime, self.stop)

    def stop(self):
        log.info("\n")
        log.info('end-of-execution-stopping-consumers')
        # Ask each of our consumers to stop. When a consumer fully stops, it
        # fires the deferred returned from its start() method. We saved all
        # those deferreds away (above, in start()) in self._consumer_d_list,
        # so now we'll use a DeferredList to wait for all of them...
        for consumer in self._consumer_list:
            consumer.stop()
        dl = DeferredList(self._consumer_d_list)

        # Once the consumers are all stopped, then close our client
        def _stop_client(result):
            if isinstance(result, Failure):
                log.error('error', result=result)
            else:
                log.info('all-consumers-stopped', client=self._client)
            self._client.close()
            return result

        dl.addBoth(_stop_client)

        # And once the client is shutdown, stop the reactor
        def _stop_reactor(result):
            reactor.stop()
            return result

        dl.addBoth(_stop_reactor)

    def msg_processor(self, consumer, msglist):
        for msg in msglist:
            log.info(msg)
Esempio n. 12
0
class DashDaemon(object):
    def __init__(self, consul_endpoint, kafka_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = kafka_endpoint
        self.consul_endpoint = consul_endpoint

        if kafka_endpoint.startswith('@'):
            retrys = 10
            while True:
                try:
                    self.kafka_endpoint = get_endpoint_from_consul(
                        self.consul_endpoint, kafka_endpoint[1:])
                    break
                except:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                retrys -= 1
                if retrys == 0:
                    log.error("unable-to-communicate-with-consul")
                    self.stop()
                time.sleep(10)

        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []

    def set_on_start_callback(self, on_start_callback):
        # This function is currently unused, future requirements.
        self.on_start_callback = on_start_callback
        return self

    @inlineCallbacks
    def start(self):
        partitions = []
        try:
            while not partitions:
                yield self._client.load_metadata_for_topics(self.topic)
                #self._client.load_metadata_for_topics(self.topic)
                e = self._client.metadata_error_for_topic(self.topic)
                if e:
                    log.warning('no-metadata-for-topic', error=e,
                                topic=self.topic)
                else:
                    partitions = self._client.topic_partitions[self.topic]
                    break
                time.sleep(20)
        except KafkaUnavailableError:
            log.error("unable-to-communicate-with-Kafka-brokers")
            self.stop()

        def _note_consumer_stopped(result, consumer):
            log.info('consumer-stopped', consumer=consumer,
                     result=result)

        for partition in partitions:
            c = Consumer(self._client, self.topic, partition,
                         self.msg_processor)
            self._consumer_list.append(c)
            log.info('consumer-started', topic=self.topic, partition=partition)
            d = c.start(OFFSET_LATEST)
            d.addBoth(_note_consumer_stopped, c)
            self._consumer_d_list.append(d)

        # Now read the list of existing dashboards from Grafana and create the
        # dictionary of dashboard timers. If we've crashed there will be
        # dashboards there. Just add them and if they're no longer valid
        # they'll be deleted. If they are valid then they'll persist.
        #print("Starting main loop")
        try:
            retrys = 10
            while True:
                r = requests.get(self.grafana_url + "/datasources")
                if r.status_code == requests.codes.ok:
                    break
                else:
                    retrys -= 1
                    if retrys == 0:
                        log.error("unable-to-communicate-with-grafana")
                        self.stop()
                    time.sleep(10)
            j = r.json()
            data_source = False
            for i in j:
                if i["name"] == "Voltha Stats":
                     data_source = True
                     break
            if not data_source:
                r = requests.post(self.grafana_url + "/datasources",
                data = {"name":"Voltha Stats","type":"graphite",
                        "access":"proxy","url":"http://localhost:81"})
                log.info('data-source-added',status=r.status_code, text=r.text)

            retrys = 10
            while True:
                r = requests.get(self.grafana_url + "/search?")
                if r.status_code == requests.codes.ok:
                    break
                else:
                    retrys -= 1
                    if retrys == 0:
                        log.error("unable-to-communicate-with-grafana")
                        self.stop()
                    time.sleep(10)
            j = r.json()
            for i in j:
                # Look for dashboards that have a title of *olt.[[:hexidgit:]].
                # These will be the ones of interest. Others should just be left
                # alone.
                #print(i['title'])
                match = re.search(r'(.*olt)\.([0-9a-zA-Z]+)',i['title'])
                if match and match.lastindex > 0:
                    #print(match.group(1), match.group(2))
                    self.dash_meta[match.group(2)] = {}
                    self.dash_meta[match.group(2)]['timer'] = self.timer_duration # 10 min
                    self.dash_meta[match.group(2)]['device'] = match.group(1)
                    self.dash_meta[match.group(2)]['created'] = False
                    self.dash_meta[match.group(2)]['ports'] = {}
                    # TODO: We should really capture all of the chart data
                    # including the rows, panels, and data points being logged.
                    # This is good enough for now though to determine if
                    # there's already a dashboard for a given device.


            def countdown_processor():
                # Called every X (timer_resolution) seconds to count down each of the
                # dash timers. If a timer reaches 0 the corresponding
                # dashboard is removed.
                #log.info("Counting down.")
                try:
                    for dashboard in self.dash_meta.keys():
                        #print("Counting down %s." %dashboard)
                        # Issue a log if the counter decrement is somewhat relevant
                        if(self.dash_meta[dashboard]['timer'] % 100 == 0 and \
                           self.dash_meta[dashboard]['timer'] != self.timer_duration):
                            log.info("counting-down",dashboard=dashboard,
                                     timer=self.dash_meta[dashboard]['timer'])
                        self.dash_meta[dashboard]['timer'] -= self.timer_resolution
                        if self.dash_meta[dashboard]['timer'] <= 0:
                            # Delete the dashboard here
                            log.info("FIXME:-Should-delete-the-dashboard-here",
                                     dashboard=dashboard)
                            pass
                except:
                    e = sys.exc_info()
                    log.error("error", error=e)
            # Start the dashboard countdown processor
            log.info("starting-countdown-processor")
            lc = LoopingCall(countdown_processor)
            lc.start(self.timer_resolution)

            @inlineCallbacks
            def template_checker():
                try:
                    # Called every so often (timer_resolution seconds because it's
                    # convenient) to check if a template dashboard has been defined
                    # in Grafana. If it has been, replace the built in template
                    # with the one provided
                    r = requests.get(self.grafana_url + "/search?query=template")
                    db = r.json()
                    if len(db) == 1:
                        # Apply the template
                        yield self.dash_template.apply_template(db[0])
                    elif len(db) != 0:
                        # This is an error, log it.
                        log.warning("More-than-one-template-provided-ignoring")
                except:
                    e = sys.exc_info()
                    log.error("error", error=e)

            log.info("starting-template-checker")
            lc = LoopingCall(template_checker)
            lc.start(self.timer_resolution)

        except:
            e = sys.exc_info()
            log.error("error", error=e)

    def stop(self):
        log.info("\n")
        log.info('end-of-execution-stopping-consumers')
        # Ask each of our consumers to stop. When a consumer fully stops, it
        # fires the deferred returned from its start() method. We saved all
        # those deferreds away (above, in start()) in self._consumer_d_list,
        # so now we'll use a DeferredList to wait for all of them...
        for consumer in self._consumer_list:
            consumer.stop()
        dl = DeferredList(self._consumer_d_list)

        # Once the consumers are all stopped, then close our client
        def _stop_client(result):
            if isinstance(result, Failure):
                log.error('error', result=result)
            else:
                log.info('all-consumers-stopped', client=self._client)
            self._client.close()
            return result

        dl.addBoth(_stop_client)

        # And once the client is shutdown, stop the reactor
        def _stop_reactor(result):
            reactor.stop()
            return result

        dl.addBoth(_stop_reactor)

    def check_for_dashboard(self, msg):
        need_dash = {}
        done = {}
        # Extract the ids for all olt(s) in the message and do one of 2
        # things. If it exists, reset the meta_data timer for the dashboard and
        # if it doesn't exist add it to the array of needed dashboards.
        metrics = json.loads(getattr(msg.message,'value'))['prefixes']
        for key in metrics.keys():
            match = re.search(r'voltha\.(.*olt)\.([0-9a-zA-Z]+)\.(.*)',key)
            if match and match.lastindex > 1:
                if match.group(2) in self.dash_meta and match.group(2) not in done:
                    # Update the delete countdown timer
                    self.dash_meta[match.group(2)]['timer'] = self.timer_duration
                    done[match.group(2)] = True
                    # Issue a log if the reset if somewhat relevant.
                    if self.dash_meta[match.group(2)]['timer'] < \
                    self.timer_duration - self.timer_resolution:
                        log.info("reset-timer",device=match.group(2))
                    #print("reset timer for: %s" %match.group(2))
                else:
                    # No dahsboard exists,
                    need_dash[key] = metrics[key]
        return need_dash

    def create_dashboards(self, createList):
        dataIds = "ABCDEFGHIJKLMNOP"
        for dash in createList:
            #log.info("creating a dashboard for: %s" % self.dash_meta[dash])
            # Create one row per "interface"
            # Create one panel per metric type for the time being it's one
            # panel for byte stats and one panel for packet stats.
            newDash = json.loads(self.dash_template.dashBoard)
            newDash['dashboard']['title'] = self.dash_meta[dash]['device'] + \
                    '.' + dash
            # The port is the main grouping attribute
            for port in self.dash_meta[dash]['ports']:
                # Add in the rows for the port specified by the template
                for row in self.dash_template.rows:
                    r = json.loads(self.dash_template.dashRow)
                    r['title'] = re.sub(r'%port%',port, row['title'])
                    p = {}
                    # Add the panels to the row per the template
                    panelId = 1
                    for panel in self.dash_template.panels:
                        p = json.loads(self.dash_template.dashPanel)
                        p['id'] = panelId
                        panelId += 1
                        p['title'] = re.sub(r'%port%', port.upper(), panel['title'])
                        t = {}
                        dataId = 0
                        # Add the targets to the panel
                        for dpoint in sorted(self.dash_meta[dash]['ports'][port]):
                            if dpoint in panel:
                                t['refId'] = dataIds[dataId]
                                db = re.sub(r'%port%',port,panel[dpoint])
                                db = re.sub(r'%device%',
                                            self.dash_meta[dash]['device'],db)
                                db = re.sub(r'%deviceId%', dash,db)
                                t['target'] = db
                                p['targets'].append(t.copy())
                                dataId += 1
                        r['panels'].append(p.copy())
                    newDash['dashboard']['rows'].append(r.copy())
            #print("NEW DASHBOARD: ",json.dumps(newDash))
            #print(r.json())
            r = \
            requests.post(self.grafana_url + "/dashboards/db",
                          json=newDash)
            self.dash_meta[dash]['slug'] = r.json()['slug']
            self.dash_meta[dash]['created'] = True
            log.info("created-dashboard", slug=self.dash_meta[dash]['slug'])

    def msg_processor(self, consumer, msglist):
        try:
            createList = []
            for msg in msglist:
                # Reset the timer for existing dashboards and get back a dict
                # of of dashboards to create if any.
                need_dash = self.check_for_dashboard(msg)
                # Now populate the meta data for all missing dashboards
                for key in need_dash.keys():
                    match = re.search(r'voltha\.(.*olt)\.([0-9a-zA-Z]+)\.(.*)',key)
                    if match and match.lastindex > 2:
                        if match.group(2) in self.dash_meta:
                            # The entry will have been created when the first
                            # port in the record was encountered so just
                            # populate the metrics and port info.
                            # TODO: The keys below are the names of the metrics
                            # that are in the Kafka record. This auto-discovery
                            # is fine if all that's needed are raw metrics. If
                            # metrics are "cooked" by a downstream process and
                            # subsequently fed to graphite/carbon without being
                            # re-posted to Kafka, discovery becomes impossible.
                            # In those cases and in cases where finer grain
                            # control of what's displayed is required, a config
                            # file would be necessary.
                            self.dash_meta[match.group(2)]['ports'][match.group(3)] = \
                            need_dash[key]['metrics'].keys()
                        else:
                            # Not there, create a meta-data record for the
                            # device and add this port.
                            #print("Adding meta data for", match.group(1),
                            #      match.group(2))
                            createList.append(match.group(2))
                            self.dash_meta[match.group(2)] = {}
                            self.dash_meta[match.group(2)]['timer'] = 600
                            self.dash_meta[match.group(2)]['device'] = match.group(1)
                            self.dash_meta[match.group(2)]['created'] = False
                            self.dash_meta[match.group(2)]['ports'] = {}
                            #print("Adding port", match.group(3), "to", match.group(1),
                            #      match.group(2))
                            self.dash_meta[match.group(2)]['ports'][match.group(3)] = \
                            need_dash[key]['metrics'].keys()
            # Now go ahead and create the dashboards using the meta data that
            # wwas just populated for them.
            if len(createList) != 0: # Create any missing dashboards.
                   self.create_dashboards(createList)
        except:
            e = sys.exc_info()
            log.error("error", error=e)
Esempio n. 13
0
class ConsumerExample(object):
    def __init__(self, consul_endpoint, topic="voltha.heartbeat", runtime=60):
        self.topic = topic
        self.runtime = runtime
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []

    @inlineCallbacks
    def start(self):
        partitions = []
        try:
            while not partitions:
                yield self._client.load_metadata_for_topics(self.topic)
                e = self._client.metadata_error_for_topic(self.topic)
                if e:
                    log.warning('no-metadata-for-topic',
                                error=e,
                                topic=self.topic)
                else:
                    partitions = self._client.topic_partitions[self.topic]
        except KafkaUnavailableError:
            log.error("unable-to-communicate-with-Kafka-brokers")
            self.stop()

        def _note_consumer_stopped(result, consumer):
            log.info('consumer-stopped', consumer=consumer, result=result)

        for partition in partitions:
            c = Consumer(self._client, self.topic, partition,
                         self.msg_processor)
            self._consumer_list.append(c)
            # log.info('consumer-started', topic=self.topic, partition=partition)
            d = c.start(OFFSET_LATEST)
            d.addBoth(_note_consumer_stopped, c)
            self._consumer_d_list.append(d)

        # Stop ourselves after we've run the allotted time
        reactor.callLater(self.runtime, self.stop)

    def stop(self):
        log.info("\n")
        log.info('end-of-execution-stopping-consumers')
        # Ask each of our consumers to stop. When a consumer fully stops, it
        # fires the deferred returned from its start() method. We saved all
        # those deferreds away (above, in start()) in self._consumer_d_list,
        # so now we'll use a DeferredList to wait for all of them...
        for consumer in self._consumer_list:
            consumer.stop()
        dl = DeferredList(self._consumer_d_list)

        # Once the consumers are all stopped, then close our client
        def _stop_client(result):
            if isinstance(result, Failure):
                log.error('error', result=result)
            else:
                log.info('all-consumers-stopped', client=self._client)
            self._client.close()
            return result

        dl.addBoth(_stop_client)

        # And once the client is shutdown, stop the reactor
        def _stop_reactor(result):
            reactor.stop()
            return result

        dl.addBoth(_stop_reactor)

    def msg_processor(self, consumer, msglist):
        for msg in msglist:
            log.info(msg)
Esempio n. 14
0
class DashDaemon(object):
    def __init__(self, consul_endpoint, grafana_url, topic="voltha.heartbeat"):
        #logging.basicConfig(
        # format='%(asctime)s:%(name)s:' +
        #        '%(levelname)s:%(process)d:%(message)s',
        # level=logging.INFO
        #)
        self.dash_meta = {}
        self.timer_resolution = 10
        self.timer_duration = 600
        self.topic = topic
        self.dash_template = DashTemplate(grafana_url)
        self.grafana_url = grafana_url
        self.kafka_endpoint = get_endpoint_from_consul(consul_endpoint,
                                                       'kafka')
        # print('kafka endpoint: ', self.kafka_endpoint)
        self.on_start_callback = None

        self._client = KafkaClient(self.kafka_endpoint)
        self._consumer_list = []  # List of consumers
        # List of deferred returned from consumers' start() methods
        self._consumer_d_list = []

    def set_on_start_callback(self, on_start_callback):
        # This function is currently unused, future requirements.
        self.on_start_callback = on_start_callback
        return self

    @inlineCallbacks
    def start(self):
        partitions = []
        try:
            while not partitions:
                yield self._client.load_metadata_for_topics(self.topic)
                e = self._client.metadata_error_for_topic(self.topic)
                if e:
                    log.warning('no-metadata-for-topic',
                                error=e,
                                topic=self.topic)
                else:
                    partitions = self._client.topic_partitions[self.topic]
        except KafkaUnavailableError:
            log.error("unable-to-communicate-with-Kafka-brokers")
            self.stop()

        def _note_consumer_stopped(result, consumer):
            log.info('consumer-stopped', consumer=consumer, result=result)

        for partition in partitions:
            c = Consumer(self._client, self.topic, partition,
                         self.msg_processor)
            self._consumer_list.append(c)
            log.info('consumer-started', topic=self.topic, partition=partition)
            d = c.start(OFFSET_LATEST)
            d.addBoth(_note_consumer_stopped, c)
            self._consumer_d_list.append(d)

        # Now read the list of existing dashboards from Grafana and create the
        # dictionary of dashboard timers. If we've crashed there will be
        # dashboards there. Just add them and if they're no longer valid
        # they'll be deleted. If they are valid then they'll persist.
        #print("Starting main loop")
        try:
            r = requests.get(self.grafana_url + "/search?")
            j = r.json()
            for i in j:
                # Look for dashboards that have a title of *olt.[[:hexidgit:]].
                # These will be the ones of interest. Others should just be left
                # alone.
                #print(i['title'])
                match = re.search(r'(.*olt)\.([0-9a-zA-Z]+)', i['title'])
                if match and match.lastindex > 0:
                    #print(match.group(1), match.group(2))
                    self.dash_meta[match.group(2)] = {}
                    self.dash_meta[match.group(
                        2)]['timer'] = self.timer_duration  # 10 min
                    self.dash_meta[match.group(2)]['device'] = match.group(1)
                    self.dash_meta[match.group(2)]['created'] = False
                    self.dash_meta[match.group(2)]['ports'] = {}
                    # TODO: We should really capture all of the chart data
                    # including the rows, panels, and data points being logged.
                    # This is good enough for now though to determine if
                    # there's already a dashboard for a given device.

            def countdown_processor():
                # Called every X (timer_resolution) seconds to count down each of the
                # dash timers. If a timer reaches 0 the corresponding
                # dashboard is removed.
                #log.info("Counting down.")
                try:
                    for dashboard in self.dash_meta.keys():
                        #print("Counting down %s." %dashboard)
                        # Issue a log if the counter decrement is somewhat relevant
                        if(self.dash_meta[dashboard]['timer'] % 100 == 0 and \
                           self.dash_meta[dashboard]['timer'] != self.timer_duration):
                            log.info("counting-down",
                                     dashboard=dashboard,
                                     timer=self.dash_meta[dashboard]['timer'])
                        self.dash_meta[dashboard][
                            'timer'] -= self.timer_resolution
                        if self.dash_meta[dashboard]['timer'] <= 0:
                            # Delete the dashboard here
                            log.info("FIXME:-Should-delete-the-dashboard-here",
                                     dashboard=dashboard)
                            pass
                except:
                    e = sys.exc_info()
                    log.error("error", error=e)

            # Start the dashboard countdown processor
            log.info("starting-countdown-processor")
            lc = LoopingCall(countdown_processor)
            lc.start(self.timer_resolution)

            @inlineCallbacks
            def template_checker():
                try:
                    # Called every so often (timer_resolution seconds because it's
                    # convenient) to check if a template dashboard has been defined
                    # in Grafana. If it has been, replace the built in template
                    # with the one provided
                    r = requests.get(self.grafana_url +
                                     "/search?query=template")
                    db = r.json()
                    if len(db) == 1:
                        # Apply the template
                        yield self.dash_template.apply_template(db[0])
                    elif len(db) != 0:
                        # This is an error, log it.
                        log.warning("More-than-one-template-provided-ignoring")
                except:
                    e = sys.exc_info()
                    log.error("error", error=e)

            log.info("starting-template-checker")
            lc = LoopingCall(template_checker)
            lc.start(self.timer_resolution)

        except:
            e = sys.exc_info()
            log.error("error", error=e)

    def stop(self):
        log.info("\n")
        log.info('end-of-execution-stopping-consumers')
        # Ask each of our consumers to stop. When a consumer fully stops, it
        # fires the deferred returned from its start() method. We saved all
        # those deferreds away (above, in start()) in self._consumer_d_list,
        # so now we'll use a DeferredList to wait for all of them...
        for consumer in self._consumer_list:
            consumer.stop()
        dl = DeferredList(self._consumer_d_list)

        # Once the consumers are all stopped, then close our client
        def _stop_client(result):
            if isinstance(result, Failure):
                log.error('error', result=result)
            else:
                log.info('all-consumers-stopped', client=self._client)
            self._client.close()
            return result

        dl.addBoth(_stop_client)

        # And once the client is shutdown, stop the reactor
        def _stop_reactor(result):
            reactor.stop()
            return result

        dl.addBoth(_stop_reactor)

    def check_for_dashboard(self, msg):
        need_dash = {}
        done = {}
        # Extract the ids for all olt(s) in the message and do one of 2
        # things. If it exists, reset the meta_data timer for the dashboard and
        # if it doesn't exist add it to the array of needed dashboards.
        metrics = json.loads(getattr(msg.message, 'value'))['prefixes']
        for key in metrics.keys():
            match = re.search(r'voltha\.(.*olt)\.([0-9a-zA-Z]+)\.(.*)', key)
            if match and match.lastindex > 1:
                if match.group(2) in self.dash_meta and match.group(
                        2) not in done:
                    # Update the delete countdown timer
                    self.dash_meta[match.group(
                        2)]['timer'] = self.timer_duration
                    done[match.group(2)] = True
                    # Issue a log if the reset if somewhat relevant.
                    if self.dash_meta[match.group(2)]['timer'] < \
                    self.timer_duration - self.timer_resolution:
                        log.info("reset-timer", device=match.group(2))
                    #print("reset timer for: %s" %match.group(2))
                else:
                    # No dahsboard exists,
                    need_dash[key] = metrics[key]
        return need_dash

    def create_dashboards(self, createList):
        dataIds = "ABCDEFGHIJKLMNOP"
        for dash in createList:
            #log.info("creating a dashboard for: %s" % self.dash_meta[dash])
            # Create one row per "interface"
            # Create one panel per metric type for the time being it's one
            # panel for byte stats and one panel for packet stats.
            newDash = json.loads(self.dash_template.dashBoard)
            newDash['dashboard']['title'] = self.dash_meta[dash]['device'] + \
                    '.' + dash
            # The port is the main grouping attribute
            for port in self.dash_meta[dash]['ports']:
                # Add in the rows for the port specified by the template
                for row in self.dash_template.rows:
                    r = json.loads(self.dash_template.dashRow)
                    r['title'] = re.sub(r'%port%', port, row['title'])
                    p = {}
                    # Add the panels to the row per the template
                    panelId = 1
                    for panel in self.dash_template.panels:
                        p = json.loads(self.dash_template.dashPanel)
                        p['id'] = panelId
                        panelId += 1
                        p['title'] = re.sub(r'%port%', port.upper(),
                                            panel['title'])
                        t = {}
                        dataId = 0
                        # Add the targets to the panel
                        for dpoint in sorted(
                                self.dash_meta[dash]['ports'][port]):
                            if dpoint in panel:
                                t['refId'] = dataIds[dataId]
                                db = re.sub(r'%port%', port, panel[dpoint])
                                db = re.sub(r'%device%',
                                            self.dash_meta[dash]['device'], db)
                                db = re.sub(r'%deviceId%', dash, db)
                                t['target'] = db
                                p['targets'].append(t.copy())
                                dataId += 1
                        r['panels'].append(p.copy())
                    newDash['dashboard']['rows'].append(r.copy())
            #print("NEW DASHBOARD: ",json.dumps(newDash))
            #print(r.json())
            r = \
            requests.post(self.grafana_url + "/dashboards/db",
                          json=newDash)
            self.dash_meta[dash]['slug'] = r.json()['slug']
            self.dash_meta[dash]['created'] = True
            log.info("created-dashboard", slug=self.dash_meta[dash]['slug'])

    def msg_processor(self, consumer, msglist):
        try:
            createList = []
            for msg in msglist:
                # Reset the timer for existing dashboards and get back a dict
                # of of dashboards to create if any.
                need_dash = self.check_for_dashboard(msg)
                # Now populate the meta data for all missing dashboards
                for key in need_dash.keys():
                    match = re.search(r'voltha\.(.*olt)\.([0-9a-zA-Z]+)\.(.*)',
                                      key)
                    if match and match.lastindex > 2:
                        if match.group(2) in self.dash_meta:
                            # The entry will have been created when the first
                            # port in the record was encountered so just
                            # populate the metrics and port info.
                            # TODO: The keys below are the names of the metrics
                            # that are in the Kafka record. This auto-discovery
                            # is fine if all that's needed are raw metrics. If
                            # metrics are "cooked" by a downstream process and
                            # subsequently fed to graphite/carbon without being
                            # re-posted to Kafka, discovery becomes impossible.
                            # In those cases and in cases where finer grain
                            # control of what's displayed is required, a config
                            # file would be necessary.
                            self.dash_meta[match.group(2)]['ports'][match.group(3)] = \
                            need_dash[key]['metrics'].keys()
                        else:
                            # Not there, create a meta-data record for the
                            # device and add this port.
                            #print("Adding meta data for", match.group(1),
                            #      match.group(2))
                            createList.append(match.group(2))
                            self.dash_meta[match.group(2)] = {}
                            self.dash_meta[match.group(2)]['timer'] = 600
                            self.dash_meta[match.group(
                                2)]['device'] = match.group(1)
                            self.dash_meta[match.group(2)]['created'] = False
                            self.dash_meta[match.group(2)]['ports'] = {}
                            #print("Adding port", match.group(3), "to", match.group(1),
                            #      match.group(2))
                            self.dash_meta[match.group(2)]['ports'][match.group(3)] = \
                            need_dash[key]['metrics'].keys()
            # Now go ahead and create the dashboards using the meta data that
            # wwas just populated for them.
            if len(createList) != 0:  # Create any missing dashboards.
                self.create_dashboards(createList)
        except:
            e = sys.exc_info()
            log.error("error", error=e)
class IKafkaMessagingProxy(object):
    _kafka_messaging_instance = None

    def __init__(self, kafka_host_port, kv_store, default_topic, target_cls):
        """
        Initialize the kafka proxy.  This is a singleton (may change to
        non-singleton if performance is better)
        :param kafka_host_port: Kafka host and port
        :param kv_store: Key-Value store
        :param default_topic: Default topic to subscribe to
        :param target_cls: target class - method of that class is invoked
        when a message is received on the default_topic
        """
        # return an exception if the object already exist
        if IKafkaMessagingProxy._kafka_messaging_instance:
            raise Exception('Singleton-exist', cls=IKafkaMessagingProxy)

        log.debug("Initializing-KafkaProxy")
        self.kafka_host_port = kafka_host_port
        self.kv_store = kv_store
        self.default_topic = default_topic
        self.target_cls = target_cls
        self.topic_target_cls_map = {}
        self.topic_consumer_map = {}
        self.topic_callback_map = {}
        self.subscribers = {}
        self.kafka_client = None
        self.kafka_proxy = None
        self.transaction_id_deferred_map = {}
        self.received_msg_queue = DeferredQueue()

        self.init_time = 0
        self.init_received_time = 0

        self.init_resp_time = 0
        self.init_received_resp_time = 0

        self.num_messages = 0
        self.total_time = 0
        self.num_responses = 0
        self.total_time_responses = 0
        log.debug("KafkaProxy-initialized")

    def start(self):
        try:
            # Create the kafka client
            # assert self.kafka_host is not None
            # assert self.kafka_port is not None
            # kafka_host_port = ":".join((self.kafka_host, self.kafka_port))
            self.kafka_client = KafkaClient(self.kafka_host_port)

            # Get the kafka proxy instance.  If it does not exist then
            # create it
            self.kafka_proxy = get_kafka_proxy()
            if self.kafka_proxy == None:
                KafkaProxy(kafka_endpoint=self.kafka_host_port).start()
                self.kafka_proxy = get_kafka_proxy()

            # Subscribe the default topic and target_cls
            self.topic_target_cls_map[self.default_topic] = self.target_cls

            # Start the queue to handle incoming messages
            reactor.callLater(0, self._received_message_processing_loop)

            # Start listening for incoming messages
            reactor.callLater(0,
                              self.subscribe,
                              self.default_topic,
                              target_cls=self.target_cls)

            # Setup the singleton instance
            IKafkaMessagingProxy._kafka_messaging_instance = self
        except Exception as e:
            log.exception("Failed-to-start-proxy", e=e)

    def stop(self):
        """
        Invoked to stop the kafka proxy
        :return: None on success, Exception on failure
        """
        log.debug("Stopping-messaging-proxy ...")
        try:
            # Stop all the consumers
            deferred_list = []
            for key, values in self.topic_consumer_map.iteritems():
                deferred_list.extend([c.stop() for c in values])

            if not deferred_list:
                d = gatherResults(deferred_list)
                d.addCallback(lambda result: self.kafka_client.close())
            log.debug("Messaging-proxy-stopped.")
        except Exception as e:
            log.exception("Exception-when-stopping-messaging-proxy:", e=e)

    @inlineCallbacks
    def create_topic(self, topic):
        yield self._wait_until_topic_is_ready(self.kafka_client, topic)

    @inlineCallbacks
    def _wait_until_topic_is_ready(self, client, topic):
        e = True
        while e:
            yield client.load_metadata_for_topics(topic)
            e = client.metadata_error_for_topic(topic)
            if e:
                log.debug("Topic-not-ready-retrying...", topic=topic)

    def _clear_backoff(self):
        if self.retries:
            log.info('reconnected-to-consul', after_retries=self.retries)
            self.retries = 0

    def get_target_cls(self):
        return self.target_cls

    def get_default_topic(self):
        return self.default_topic

    @inlineCallbacks
    def _subscribe(self, topic, offset, callback=None, target_cls=None):
        try:
            log.debug("subscribing-to-topic-start", topic=topic)
            yield self._wait_until_topic_is_ready(self.kafka_client, topic)
            partitions = self.kafka_client.topic_partitions[topic]
            consumers = []

            # First setup the generic callback - all received messages will
            # go through that queue
            if topic not in self.topic_consumer_map:
                log.debug("topic-not-in-consumer-map", topic=topic)
                consumers = [
                    Consumer(self.kafka_client, topic, partition,
                             self._enqueue_received_message)
                    for partition in partitions
                ]
                self.topic_consumer_map[topic] = consumers

            log.debug("_subscribe",
                      topic=topic,
                      consumermap=self.topic_consumer_map)

            if target_cls is not None and callback is None:
                # Scenario #1
                if topic not in self.topic_target_cls_map:
                    self.topic_target_cls_map[topic] = target_cls
            elif target_cls is None and callback is not None:
                # Scenario #2
                log.debug("custom-callback",
                          topic=topic,
                          callback_map=self.topic_callback_map)
                if topic not in self.topic_callback_map:
                    self.topic_callback_map[topic] = [callback]
                else:
                    self.topic_callback_map[topic].extend([callback])
            else:
                log.warn("invalid-parameters")

            def cb_closed(result):
                """
                Called when a consumer cleanly stops.
                """
                log.debug("Consumers-cleanly-stopped")

            def eb_failed(failure):
                """
                Called when a consumer fails due to an uncaught exception in the
                processing callback or a network error on shutdown. In this case we
                simply log the error.
                """
                log.warn("Consumers-failed", failure=failure)

            for c in consumers:
                c.start(offset).addCallbacks(cb_closed, eb_failed)

            log.debug("subscribed-to-topic", topic=topic)

            returnValue(True)
        except Exception as e:
            log.exception("Exception-during-subscription", e=e)
            returnValue(False)

    @inlineCallbacks
    def subscribe(self,
                  topic,
                  callback=None,
                  target_cls=None,
                  max_retry=3,
                  offset=OFFSET_LATEST):
        """
        Scenario 1:  invoked to subscribe to a specific topic with a
        target_cls to invoke when a message is received on that topic.  This
        handles the case of request/response where this library performs the
        heavy lifting. In this case the m_callback must to be None

        Scenario 2:  invoked to subscribe to a specific topic with a
        specific callback to invoke when a message is received on that topic.
        This handles the case where the caller wants to process the message
        received itself. In this case the target_cls must to be None

        :param topic: topic to subscribe to
        :param callback: Callback to invoke when a message is received on
        the topic. Either one of callback or target_cls needs can be none
        :param target_cls:  Target class to use when a message is
        received on the topic. There can only be 1 target_cls per topic.
        Either one of callback or target_cls needs can be none
        :param max_retry:  the number of retries before reporting failure
        to subscribe.  This caters for scenario where the kafka topic is not
        ready.
        :return: True on success, False on failure
        """
        RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]

        def _backoff(msg, retries):
            wait_time = RETRY_BACKOFF[min(retries, len(RETRY_BACKOFF) - 1)]
            log.info(msg, retry_in=wait_time)
            return asleep(wait_time)

        retry = 0
        subscribed = False
        while not subscribed:
            subscribed = yield self._subscribe(topic,
                                               callback=callback,
                                               target_cls=target_cls,
                                               offset=offset)
            if subscribed:
                returnValue(True)
            elif retry > max_retry:
                returnValue(False)
            else:
                _backoff("subscription-not-complete", retry)
                retry += 1

        # while not self._subscribe(topic, callback=callback,
        #                           target_cls=target_cls):
        #     if retry > max_retry:
        #         return False
        #     else:
        #         _backoff("subscription-not-complete", retry)
        #         retry += 1
        # return True

    def unsubscribe(self, topic):
        """
        Invoked when unsubscribing to a topic
        :param topic: topic to unsubscibe from
        :return: None on success or Exception on failure
        """
        log.debug("Unsubscribing-to-topic", topic=topic)

        def remove_topic(topic):
            if topic in self.topic_consumer_map:
                del self.topic_consumer_map[topic]

        try:
            if topic in self.topic_consumer_map:
                consumers = self.topic_consumer_map[topic]
                d = gatherResults([c.stop() for c in consumers])
                d.addCallback(remove_topic, topic)
                log.debug("Unsubscribed-to-topic.", topic=topic)
            else:
                log.debug("Topic-does-not-exist.", topic=topic)
        except Exception as e:
            log.exception("Exception-when-stopping-messaging-proxy:", e=e)

    @inlineCallbacks
    def _enqueue_received_message(self, reactor, message_list):
        """
        Internal method to continuously queue all received messaged
        irrespective of topic
        :param reactor: A requirement by the Twisted Python kafka library
        :param message_list: Received list of messages
        :return: None on success, Exception on failure
        """
        try:
            for m in message_list:
                log.debug("received-msg", msg=m)
                yield self.received_msg_queue.put(m)
        except Exception as e:
            log.exception("Failed-enqueueing-received-message", e=e)

    @inlineCallbacks
    def _received_message_processing_loop(self):
        """
        Internal method to continuously process all received messages one
        at a time
        :return: None on success, Exception on failure
        """
        while True:
            try:
                message = yield self.received_msg_queue.get()
                yield self._process_message(message)
            except Exception as e:
                log.exception("Failed-dequeueing-received-message", e=e)

    def _to_string(self, unicode_str):
        if unicode_str is not None:
            if type(unicode_str) == unicode:
                return unicode_str.encode('ascii', 'ignore')
            else:
                return unicode_str
        else:
            return None

    def _format_request(self, rpc, to_topic, reply_topic, **kwargs):
        """
        Format a request to send over kafka
        :param rpc: Requested remote API
        :param to_topic: Topic to send the request
        :param reply_topic: Topic to receive the resulting response, if any
        :param kwargs: Dictionary of key-value pairs to pass as arguments to
        the remote rpc API.
        :return: A InterContainerMessage message type on success or None on
        failure
        """
        try:
            transaction_id = uuid4().hex
            request = InterContainerMessage()
            request_body = InterContainerRequestBody()
            request.header.id = transaction_id
            request.header.type = MessageType.Value("REQUEST")
            request.header.from_topic = reply_topic
            request.header.to_topic = to_topic

            response_required = False
            if reply_topic:
                request_body.reply_to_topic = reply_topic
                request_body.response_required = True
                response_required = True

            request.header.timestamp = int(round(time.time() * 1000))
            request_body.rpc = rpc
            for a, b in kwargs.iteritems():
                arg = Argument()
                arg.key = a
                try:
                    arg.value.Pack(b)
                    request_body.args.extend([arg])
                except Exception as e:
                    log.exception("Failed-parsing-value", e=e)
            request.body.Pack(request_body)
            return request, transaction_id, response_required
        except Exception as e:
            log.exception("formatting-request-failed",
                          rpc=rpc,
                          to_topic=to_topic,
                          reply_topic=reply_topic,
                          args=kwargs)
            return None, None, None

    def _format_response(self, msg_header, msg_body, status):
        """
        Format a response
        :param msg_header: The header portion of a received request
        :param msg_body: The response body
        :param status: True is this represents a successful response
        :return: a InterContainerMessage message type
        """
        try:
            assert isinstance(msg_header, Header)
            response = InterContainerMessage()
            response_body = InterContainerResponseBody()
            response.header.id = msg_header.id
            response.header.timestamp = int(round(time.time() * 1000))
            response.header.type = MessageType.Value("RESPONSE")
            response.header.from_topic = msg_header.to_topic
            response.header.to_topic = msg_header.from_topic
            if msg_body is not None:
                response_body.result.Pack(msg_body)
            response_body.success = status
            response.body.Pack(response_body)
            return response
        except Exception as e:
            log.exception("formatting-response-failed",
                          header=msg_header,
                          body=msg_body,
                          status=status,
                          e=e)
            return None

    def _parse_response(self, msg):
        try:
            message = InterContainerMessage()
            message.ParseFromString(msg)
            resp = InterContainerResponseBody()
            if message.body.Is(InterContainerResponseBody.DESCRIPTOR):
                message.body.Unpack(resp)
            else:
                log.debug("unsupported-msg", msg_type=type(message.body))
                return None
            log.debug("parsed-response", input=message, output=resp)
            return resp
        except Exception as e:
            log.exception("parsing-response-failed", msg=msg, e=e)
            return None

    @inlineCallbacks
    def _process_message(self, m):
        """
        Default internal method invoked for every batch of messages received
        from Kafka.
        """
        def _toDict(args):
            """
            Convert a repeatable Argument type into a python dictionary
            :param args: Repeatable core_adapter.Argument type
            :return: a python dictionary
            """
            if args is None:
                return None
            result = {}
            for arg in args:
                assert isinstance(arg, Argument)
                result[arg.key] = arg.value
            return result

        current_time = int(round(time.time() * 1000))
        # log.debug("Got Message", message=m)
        try:
            val = m.message.value
            # print m.topic

            # Go over customized callbacks first
            if m.topic in self.topic_callback_map:
                for c in self.topic_callback_map[m.topic]:
                    yield c(val)

            #  Check whether we need to process request/response scenario
            if m.topic not in self.topic_target_cls_map:
                return

            # Process request/response scenario
            message = InterContainerMessage()
            message.ParseFromString(val)

            if message.header.type == MessageType.Value("REQUEST"):
                # Get the target class for that specific topic
                targetted_topic = self._to_string(message.header.to_topic)
                msg_body = InterContainerRequestBody()
                if message.body.Is(InterContainerRequestBody.DESCRIPTOR):
                    message.body.Unpack(msg_body)
                else:
                    log.debug("unsupported-msg", msg_type=type(message.body))
                    return
                if targetted_topic in self.topic_target_cls_map:
                    if msg_body.args:
                        log.debug("message-body-args-present", body=msg_body)
                        (status, res) = yield getattr(
                            self.topic_target_cls_map[targetted_topic],
                            self._to_string(
                                msg_body.rpc))(**_toDict(msg_body.args))
                    else:
                        log.debug("message-body-args-absent",
                                  body=msg_body,
                                  rpc=msg_body.rpc)
                        (status, res) = yield getattr(
                            self.topic_target_cls_map[targetted_topic],
                            self._to_string(msg_body.rpc))()
                    if msg_body.response_required:
                        response = self._format_response(
                            msg_header=message.header,
                            msg_body=res,
                            status=status,
                        )
                        if response is not None:
                            res_topic = self._to_string(
                                response.header.to_topic)
                            self._send_kafka_message(res_topic, response)

                        log.debug("Response-sent",
                                  response=response.body,
                                  to_topic=res_topic)
            elif message.header.type == MessageType.Value("RESPONSE"):
                trns_id = self._to_string(message.header.id)
                if trns_id in self.transaction_id_deferred_map:
                    resp = self._parse_response(val)

                    self.transaction_id_deferred_map[trns_id].callback(resp)
            else:
                log.error("!!INVALID-TRANSACTION-TYPE!!")

        except Exception as e:
            log.exception("Failed-to-process-message", message=m, e=e)

    @inlineCallbacks
    def _send_kafka_message(self, topic, msg):
        try:
            yield self.kafka_proxy.send_message(topic, msg.SerializeToString())
        except Exception, e:
            log.exception("Failed-sending-message", message=msg, e=e)