Example #1
0
class Main(object):
    def __init__(self):

        self.args = args = parse_args()
        self.config = load_config(args)

        # log levels in python are:
        # 1 - DEBUG => verbosity_adjust = 0
        # 2 - INFO => verbosity_adjust = 1
        # 3 - WARNING => verbosity_adjust = 2
        # 4 - ERROR
        # 5 - CRITICAL
        # If no flags are set we want to stick with INFO,
        # if verbose is set we want to go down to DEBUG
        # if quiet is set we want to go up to WARNING
        # if you set both, you're doing something non-sense and you'll be back at INFO

        verbosity_adjust = self.string_to_int(str(args.log_level))
        if verbosity_adjust == -1:
            print("Invalid loglevel is given: " + str(args.log_level))
            sys.exit(0)

        self.log = setup_logging(self.config.get('logging', {}),
                                 args.instance_id,
                                 verbosity_adjust=verbosity_adjust)
        self.log.info('container-number-extractor',
                      regex=args.container_name_regex)

        self.build_info = get_build_info()
        self.log.info('OpenONU-Adapter-Version', build_version=self.build_info)

        if not args.no_banner:
            print_banner(self.log)

        self.adapter = None
        # Create a unique instance id using the passed-in instance id and
        # UTC timestamp
        current_time = arrow.utcnow().timestamp
        self.instance_id = self.args.instance_id + '_' + str(current_time)

        self.core_topic = str(args.core_topic)
        self.event_topic = str(args.event_topic)
        self.listening_topic = str(args.name)
        self.startup_components()

        if not args.no_heartbeat:
            self.start_kafka_cluster_heartbeat(self.instance_id)

    def start(self):
        self.start_reactor()  # will not return except Keyboard interrupt

    def string_to_int(self, loglevel):
        l = loglevel.upper()
        if l == "DEBUG": return 0
        elif l == "INFO": return 1
        elif l == "WARN": return 2
        elif l == "ERROR": return 3
        elif l == "CRITICAL": return 4
        else: return -1

    def stop(self):
        pass

    def get_args(self):
        """Allow access to command line args"""
        return self.args

    def get_config(self):
        """Allow access to content of config file"""
        return self.config

    def _get_adapter_config(self):
        cfg = AdapterConfig()
        return cfg

    @inlineCallbacks
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            # Update the logger to output the vcore id.
            self.log = update_logging(instance_id=self.instance_id,
                                      vcore_id=None)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(self.args.consul,
                           self.args.kafka_cluster,
                           config=self.config.get('kafka-cluster-proxy',
                                                  {}))).start()
            Probe.kafka_cluster_proxy_running = True
            Probe.kafka_proxy_faulty = False

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy,
                adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(
                adapter=self.adapter, core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler)).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.adapter_registered_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)

    @inlineCallbacks
    def shutdown_components(self):
        """Execute before the reactor is shut down"""
        self.log.info('exiting-on-keyboard-interrupt')
        for component in reversed(registry.iterate()):
            yield component.stop()

        self.server.shutdown()

        import threading
        self.log.info('THREADS:')
        main_thread = threading.current_thread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            if not t.isDaemon():
                continue
            self.log.info('joining thread {} {}'.format(
                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
            t.join()

    def start_reactor(self):
        from twisted.internet import reactor
        reactor.callWhenRunning(
            lambda: self.log.info('twisted-reactor-started'))
        reactor.addSystemEventTrigger('before', 'shutdown',
                                      self.shutdown_components)
        reactor.callInThread(self.start_probe)
        reactor.run()

    def start_probe(self):
        args = self.args
        host = args.probe.split(':')[0]
        port = args.probe.split(':')[1]
        socketserver.TCPServer.allow_reuse_address = True
        self.server = socketserver.TCPServer((host, int(port)), Probe)
        self.server.serve_forever()

    @inlineCallbacks
    def _register_with_core(self, retries):
        while 1:
            try:
                resp = yield self.core_proxy.register(
                    self.adapter.adapter_descriptor(),
                    self.adapter.device_types())
                if resp:
                    self.log.info('registered-with-core',
                                  coreId=resp.instance_id)

                returnValue(resp)
            except TimeOutError as e:
                self.log.warn("timeout-when-registering-with-core", e=e)
                if retries == 0:
                    self.log.exception("no-more-retries", e=e)
                    raise
                else:
                    retries = retries if retries < 0 else retries - 1
                    yield asleep(defs['retry_interval'])
            except Exception as e:
                self.log.exception("failed-registration", e=e)
                raise

    # Temporary function to send a heartbeat message to the external kafka
    # broker
    def start_kafka_cluster_heartbeat(self, instance_id):
        # For heartbeat we will send a message to a specific "voltha-heartbeat"
        #  topic.  The message is a protocol buf
        # message
        message = dict(type='heartbeat',
                       adapter=self.args.name,
                       instance=instance_id,
                       ip=get_my_primary_local_ipv4())
        topic = defs['heartbeat_topic']

        def send_heartbeat_msg():
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy:
                    message['ts'] = arrow.utcnow().timestamp
                    self.log.debug('sending-kafka-heartbeat-message')

                    # Creating a handler to receive the message callbacks
                    df = Deferred()
                    df.addCallback(self.process_kafka_alive_state_update)
                    kafka_cluster_proxy.register_alive_state_update(df)
                    kafka_cluster_proxy.send_heartbeat_message(
                        topic, dumps(message))
                else:
                    Probe.kafka_cluster_proxy_running = False
                    self.log.error('kafka-proxy-unavailable')
            except Exception as e:
                self.log.exception('failed-sending-message-heartbeat', e=e)

        def check_heartbeat_delivery():
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy:
                    kafka_cluster_proxy.check_heartbeat_delivery()
            except Exception as e:
                self.log.exception('failed-checking-heartbeat-delivery', e=e)

        def schedule_periodic_heartbeat():
            try:
                # Sending the heartbeat message in a loop
                lc_heartbeat = LoopingCall(send_heartbeat_msg)
                lc_heartbeat.start(10)
                # Polling the delivery status more frequently to get early notification
                lc_poll = LoopingCall(check_heartbeat_delivery)
                lc_poll.start(2)
            except Exception as e:
                self.log.exception('failed-kafka-heartbeat-startup', e=e)

        from twisted.internet import reactor
        # Delaying heartbeat initially to let kafka connection be established
        reactor.callLater(5, schedule_periodic_heartbeat)

    # Receiving the callback and updating the probe accordingly
    def process_kafka_alive_state_update(self, alive_state):
        self.log.debug('process-kafka-alive-state-update',
                       alive_state=alive_state)
        Probe.kafka_cluster_proxy_running = alive_state

        kafka_cluster_proxy = get_kafka_proxy()
        if kafka_cluster_proxy:
            Probe.kafka_proxy_faulty = kafka_cluster_proxy.is_faulty()
Example #2
0
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            # Update the logger to output the vcore id.
            self.log = update_logging(instance_id=self.instance_id,
                                      vcore_id=None)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(self.args.consul,
                           self.args.kafka_cluster,
                           config=self.config.get('kafka-cluster-proxy',
                                                  {}))).start()
            Probe.kafka_cluster_proxy_running = True
            Probe.kafka_proxy_faulty = False

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy,
                adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(
                adapter=self.adapter, core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler)).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.adapter_registered_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)
Example #3
0
class Main(object):

    def __init__(self):

        self.args = args = parse_args()
        self.config = load_config(args)

        # log levels in python are:
        # 1 - DEBUG => verbosity_adjust = 1
        # 2 - INFO => verbosity_adjust = 2
        # 3 - WARNING => verbosity_adjust = 3
        # 4 - ERROR => verbosity_adjust = 4
        # 5 - CRITICAL => verbosity_adjust = 5

        verbosity_adjust = string_to_int(str(args.log_level))
        if verbosity_adjust == -1:
            print("Invalid loglevel is given: " + str(args.log_level))
            sys.exit(0)

        self.log = setup_logging(self.config.get('logging', {}),
                                 args.instance_id,
                                 verbosity_adjust=verbosity_adjust)
        self.log.info('container-number-extractor',
                      regex=args.container_name_regex)

        self.build_info = get_build_info()
        self.log.info('OpenONU-Adapter-Version', build_version=self.build_info)

        if not args.no_banner:
            print_banner(self.log)

        self.etcd_host = str(args.etcd).split(':')[0]
        self.etcd_port = str(args.etcd).split(':')[1]

        self.controller = LogController(self.etcd_host, self.etcd_port)

        self.adapter = None
        # Create a unique instance id using the passed-in instance id and
        # UTC timestamp
        current_time = arrow.utcnow().timestamp
        self.instance_id = self.args.instance_id + '_' + str(current_time)

        self.core_topic = str(args.core_topic)
        self.event_topic = str(args.event_topic)
        self.listening_topic = str(args.name)
        self.startup_components()

        if not args.no_heartbeat:
            self.start_heartbeat()
            self.start_kafka_cluster_heartbeat(self.instance_id)

    def start(self):
        self.start_reactor()  # will not return except Keyboard interrupt

    def stop(self):
        pass

    def get_args(self):
        """Allow access to command line args"""
        return self.args

    def get_config(self):
        """Allow access to content of config file"""
        return self.config

    def _get_adapter_config(self):
        cfg = AdapterConfig()
        return cfg

    @inlineCallbacks
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(
                    self.args.consul,
                    self.args.kafka_cluster,
                    config=self.config.get('kafka-cluster-proxy', {})
                )
            ).start()
            Probe.kafka_cluster_proxy_running = True

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy, adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(adapter=self.adapter,
                                                           core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler
                )
            ).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.register_adapter_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)

    @inlineCallbacks
    def shutdown_components(self):
        """Execute before the reactor is shut down"""
        self.log.info('exiting-on-keyboard-interrupt')
        for component in reversed(registry.iterate()):
            yield component.stop()

        import threading
        self.log.info('THREADS:')
        main_thread = threading.current_thread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            if not t.isDaemon():
                continue
            self.log.info('joining thread {} {}'.format(
                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
            t.join()

    def start_reactor(self):
        from twisted.internet import reactor, threads
        reactor.callWhenRunning(
            lambda: self.log.info('twisted-reactor-started'))
        reactor.addSystemEventTrigger('before', 'shutdown',
                                      self.shutdown_components)
        reactor.callInThread(self.start_probe)
        threads.deferToThread(self.controller.start_watch_log_config_change, self.args.instance_id, str(self.args.log_level))
        reactor.run()

    def start_probe(self):
        args = self.args
        host = args.probe.split(':')[0]
        port = args.probe.split(':')[1]
        server = socketserver.TCPServer((host, int(port)), Probe)
        server.serve_forever()

    @inlineCallbacks
    def _register_with_core(self, retries):
        while 1:
            try:
                resp = yield self.core_proxy.register(
                    self.adapter.adapter_descriptor(),
                    self.adapter.device_types())
                if resp:
                    self.log.info('registered-with-core',
                                  coreId=resp.instance_id)

                returnValue(resp)
            except TimeOutError as e:
                self.log.warn("timeout-when-registering-with-core", e=e)
                if retries == 0:
                    self.log.exception("no-more-retries", e=e)
                    raise
                else:
                    retries = retries if retries < 0 else retries - 1
                    yield asleep(defs['retry_interval'])
            except Exception as e:
                self.log.exception("failed-registration", e=e)
                raise

    def start_heartbeat(self):

        t0 = time.time()
        t0s = time.ctime(t0)

        def heartbeat():
            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)

        lc = LoopingCall(heartbeat)
        lc.start(10)

    # Temporary function to send a heartbeat message to the external kafka
    # broker
    def start_kafka_cluster_heartbeat(self, instance_id):
        # For heartbeat we will send a message to a specific "voltha-heartbeat"
        #  topic.  The message is a protocol buf
        # message
        message = dict(
            type='heartbeat',
            adapter=self.args.name,
            instance=instance_id,
            ip=get_my_primary_local_ipv4()
        )
        topic = defs['heartbeat_topic']

        def send_msg(start_time):
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
                    # self.log.debug('kafka-proxy-available')
                    message['ts'] = arrow.utcnow().timestamp
                    message['uptime'] = time.time() - start_time
                    # self.log.debug('start-kafka-heartbeat')
                    kafka_cluster_proxy.send_message(topic, dumps(message))
                else:
                    Probe.kafka_cluster_proxy_running = False
                    self.log.error('kafka-proxy-unavailable')
            except Exception as e:
                self.log.exception('failed-sending-message-heartbeat', e=e)

        try:
            t0 = time.time()
            lc = LoopingCall(send_msg, t0)
            lc.start(10)
        except Exception as e:
            self.log.exception('failed-kafka-heartbeat', e=e)