예제 #1
0
class Main(object):
    def __init__(self):

        self.args = args = parse_args()
        self.config = load_config(args)

        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
        self.log = setup_logging(self.config.get('logging', {}),
                                 args.instance_id,
                                 verbosity_adjust=verbosity_adjust)
        self.log.info('container-number-extractor',
                      regex=args.container_name_regex)

        self.ponsim_olt_adapter_version = self.get_version()
        self.log.info('Ponsim-OLT-Adapter-Version',
                      version=self.ponsim_olt_adapter_version)

        if not args.no_banner:
            print_banner(self.log)

        self.adapter = None
        # Create a unique instance id using the passed-in instance id and
        # UTC timestamp
        current_time = arrow.utcnow().timestamp
        self.instance_id = self.args.instance_id + '_' + str(current_time)

        self.core_topic = args.core_topic
        self.listening_topic = args.name
        self.startup_components()

        if not args.no_heartbeat:
            self.start_heartbeat()
            self.start_kafka_cluster_heartbeat(self.instance_id)

    def get_version(self):
        path = defs['version_file']
        if not path.startswith('/'):
            dir = os.path.dirname(os.path.abspath(__file__))
            path = os.path.join(dir, path)

        path = os.path.abspath(path)
        version_file = open(path, 'r')
        v = version_file.read()

        # Use Version to validate the version string - exception will be raised
        # if the version is invalid
        Version(v)

        version_file.close()
        return v

    def start(self):
        self.start_reactor()  # will not return except Keyboard interrupt

    def stop(self):
        pass

    def get_args(self):
        """Allow access to command line args"""
        return self.args

    def get_config(self):
        """Allow access to content of config file"""
        return self.config

    def _get_adapter_config(self):
        cfg = AdapterConfig()
        return cfg

    @inlineCallbacks
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            # Update the logger to output the vcore id.
            self.log = update_logging(instance_id=self.instance_id,
                                      vcore_id=None)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(self.args.consul,
                           self.args.kafka_cluster,
                           config=self.config.get('kafka-cluster-proxy',
                                                  {}))).start()

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = PonSimOltAdapter(core_proxy=self.core_proxy,
                                            adapter_proxy=self.adapter_proxy,
                                            config=config)

            ponsim_request_handler = AdapterRequestFacade(
                adapter=self.adapter, core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=ponsim_request_handler)).start()

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)

    @inlineCallbacks
    def shutdown_components(self):
        """Execute before the reactor is shut down"""
        self.log.info('exiting-on-keyboard-interrupt')
        for component in reversed(registry.iterate()):
            yield component.stop()

        import threading
        self.log.info('THREADS:')
        main_thread = threading.current_thread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            if not t.isDaemon():
                continue
            self.log.info('joining thread {} {}'.format(
                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
            t.join()

    def start_reactor(self):
        from twisted.internet import reactor
        reactor.callWhenRunning(
            lambda: self.log.info('twisted-reactor-started'))
        reactor.addSystemEventTrigger('before', 'shutdown',
                                      self.shutdown_components)
        reactor.run()

    @inlineCallbacks
    def _register_with_core(self, retries):
        while 1:
            try:
                resp = yield self.core_proxy.register(
                    self.adapter.adapter_descriptor(),
                    self.adapter.device_types())
                if resp:
                    self.log.info('registered-with-core',
                                  coreId=resp.instance_id)
                returnValue(resp)
            except TimeOutError as e:
                self.log.warn("timeout-when-registering-with-core", e=e)
                if retries == 0:
                    self.log.exception("no-more-retries", e=e)
                    raise
                else:
                    retries = retries if retries < 0 else retries - 1
                    yield asleep(defs['retry_interval'])
            except Exception as e:
                self.log.exception("failed-registration", e=e)
                raise

    def start_heartbeat(self):

        t0 = time.time()
        t0s = time.ctime(t0)

        def heartbeat():
            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)

        lc = LoopingCall(heartbeat)
        lc.start(10)

    # Temporary function to send a heartbeat message to the external kafka
    # broker
    def start_kafka_cluster_heartbeat(self, instance_id):
        # For heartbeat we will send a message to a specific "voltha-heartbeat"
        #  topic.  The message is a protocol buf
        # message
        message = dict(type='heartbeat',
                       adapter=self.args.name,
                       instance=instance_id,
                       ip=get_my_primary_local_ipv4())
        topic = defs['heartbeat_topic']

        def send_msg(start_time):
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
                    # self.log.debug('kafka-proxy-available')
                    message['ts'] = arrow.utcnow().timestamp
                    message['uptime'] = time.time() - start_time
                    # self.log.debug('start-kafka-heartbeat')
                    kafka_cluster_proxy.send_message(topic, dumps(message))
                else:
                    self.log.error('kafka-proxy-unavailable')
            except Exception, e:
                self.log.exception('failed-sending-message-heartbeat', e=e)

        try:
            t0 = time.time()
            lc = LoopingCall(send_msg, t0)
            lc.start(10)
        except Exception, e:
            self.log.exception('failed-kafka-heartbeat', e=e)
예제 #2
0
from pyvoltha.adapters.extensions.events.device_events.onu.onu_startup_event import OnuStartupEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_temp_red_event import OnuTempRedEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_temp_yellow_event import OnuTempYellowEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_voltage_red_event import OnuVoltageRedEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_voltage_yellow_event import OnuVoltageYellowEvent
from pyvoltha.adapters.extensions.events.device_events.onu.onu_window_drift_event import OnuWindowDriftEvent

DEFAULT_ONU_DEVICE_ID = 'default_onu_mock'
DEFAULT_PON_ID = 0
DEFAULT_ONU_ID = 0
DEFAULT_ONU_SN = 'TEST00000001'
DEFAULT_OLT_SN = 'ABCDXXXXYYYY'
DEFAULT_ONU_REG = 'ABCD1234'

core_proxy = CoreProxy(kafka_proxy=None,
                       default_core_topic='rwcore',
                       default_event_topic='voltha.events',
                       my_listening_topic='openonu')

event_mgr = AdapterEvents(core_proxy, DEFAULT_ONU_DEVICE_ID,
                          DEFAULT_ONU_DEVICE_ID, DEFAULT_ONU_SN)


class TestOnuActivationFailEvent(TestCase):
    def setUp(self):
        self.event = OnuActivationFailEvent(event_mgr, DEFAULT_ONU_ID,
                                            DEFAULT_PON_ID, DEFAULT_ONU_SN,
                                            arrow.utcnow().timestamp)

    def test_get_context_data(self):
        expected_dict = {
            'onu-id': DEFAULT_ONU_ID,
예제 #3
0
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            # Update the logger to output the vcore id.
            self.log = update_logging(instance_id=self.instance_id,
                                      vcore_id=None)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(
                    self.args.consul,
                    self.args.kafka_cluster,
                    config=self.config.get('kafka-cluster-proxy', {})
                )
            ).start()

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = OpenoltAdapter(core_proxy=self.core_proxy,
                                          adapter_proxy=self.adapter_proxy,
                                          config=config)

            self.adapter.start()

            openolt_request_handler = AdapterRequestFacade(adapter=self.adapter,
                                                           core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    # Needs to assign a real class
                    target_cls=openolt_request_handler

                )
            ).start()

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)
예제 #4
0
class Main(object):
    def __init__(self):

        self.args = args = parse_args()
        self.config = load_config(args)

        # log levels in python are:
        # 1 - DEBUG => verbosity_adjust = 0
        # 2 - INFO => verbosity_adjust = 1
        # 3 - WARNING => verbosity_adjust = 2
        # 4 - ERROR
        # 5 - CRITICAL
        # If no flags are set we want to stick with INFO,
        # if verbose is set we want to go down to DEBUG
        # if quiet is set we want to go up to WARNING
        # if you set both, you're doing something non-sense and you'll be back at INFO

        verbosity_adjust = self.string_to_int(str(args.log_level))
        if verbosity_adjust == -1:
            print("Invalid loglevel is given: " + str(args.log_level))
            sys.exit(0)

        self.log = setup_logging(self.config.get('logging', {}),
                                 args.instance_id,
                                 verbosity_adjust=verbosity_adjust)
        self.log.info('container-number-extractor',
                      regex=args.container_name_regex)

        self.build_info = get_build_info()
        self.log.info('OpenONU-Adapter-Version', build_version=self.build_info)

        if not args.no_banner:
            print_banner(self.log)

        self.adapter = None
        # Create a unique instance id using the passed-in instance id and
        # UTC timestamp
        current_time = arrow.utcnow().timestamp
        self.instance_id = self.args.instance_id + '_' + str(current_time)

        self.core_topic = str(args.core_topic)
        self.event_topic = str(args.event_topic)
        self.listening_topic = str(args.name)
        self.startup_components()

        if not args.no_heartbeat:
            self.start_kafka_cluster_heartbeat(self.instance_id)

    def start(self):
        self.start_reactor()  # will not return except Keyboard interrupt

    def string_to_int(self, loglevel):
        l = loglevel.upper()
        if l == "DEBUG": return 0
        elif l == "INFO": return 1
        elif l == "WARN": return 2
        elif l == "ERROR": return 3
        elif l == "CRITICAL": return 4
        else: return -1

    def stop(self):
        pass

    def get_args(self):
        """Allow access to command line args"""
        return self.args

    def get_config(self):
        """Allow access to content of config file"""
        return self.config

    def _get_adapter_config(self):
        cfg = AdapterConfig()
        return cfg

    @inlineCallbacks
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            # Update the logger to output the vcore id.
            self.log = update_logging(instance_id=self.instance_id,
                                      vcore_id=None)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(self.args.consul,
                           self.args.kafka_cluster,
                           config=self.config.get('kafka-cluster-proxy',
                                                  {}))).start()
            Probe.kafka_cluster_proxy_running = True
            Probe.kafka_proxy_faulty = False

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy,
                adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(
                adapter=self.adapter, core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler)).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.adapter_registered_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)

    @inlineCallbacks
    def shutdown_components(self):
        """Execute before the reactor is shut down"""
        self.log.info('exiting-on-keyboard-interrupt')
        for component in reversed(registry.iterate()):
            yield component.stop()

        self.server.shutdown()

        import threading
        self.log.info('THREADS:')
        main_thread = threading.current_thread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            if not t.isDaemon():
                continue
            self.log.info('joining thread {} {}'.format(
                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
            t.join()

    def start_reactor(self):
        from twisted.internet import reactor
        reactor.callWhenRunning(
            lambda: self.log.info('twisted-reactor-started'))
        reactor.addSystemEventTrigger('before', 'shutdown',
                                      self.shutdown_components)
        reactor.callInThread(self.start_probe)
        reactor.run()

    def start_probe(self):
        args = self.args
        host = args.probe.split(':')[0]
        port = args.probe.split(':')[1]
        socketserver.TCPServer.allow_reuse_address = True
        self.server = socketserver.TCPServer((host, int(port)), Probe)
        self.server.serve_forever()

    @inlineCallbacks
    def _register_with_core(self, retries):
        while 1:
            try:
                resp = yield self.core_proxy.register(
                    self.adapter.adapter_descriptor(),
                    self.adapter.device_types())
                if resp:
                    self.log.info('registered-with-core',
                                  coreId=resp.instance_id)

                returnValue(resp)
            except TimeOutError as e:
                self.log.warn("timeout-when-registering-with-core", e=e)
                if retries == 0:
                    self.log.exception("no-more-retries", e=e)
                    raise
                else:
                    retries = retries if retries < 0 else retries - 1
                    yield asleep(defs['retry_interval'])
            except Exception as e:
                self.log.exception("failed-registration", e=e)
                raise

    # Temporary function to send a heartbeat message to the external kafka
    # broker
    def start_kafka_cluster_heartbeat(self, instance_id):
        # For heartbeat we will send a message to a specific "voltha-heartbeat"
        #  topic.  The message is a protocol buf
        # message
        message = dict(type='heartbeat',
                       adapter=self.args.name,
                       instance=instance_id,
                       ip=get_my_primary_local_ipv4())
        topic = defs['heartbeat_topic']

        def send_heartbeat_msg():
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy:
                    message['ts'] = arrow.utcnow().timestamp
                    self.log.debug('sending-kafka-heartbeat-message')

                    # Creating a handler to receive the message callbacks
                    df = Deferred()
                    df.addCallback(self.process_kafka_alive_state_update)
                    kafka_cluster_proxy.register_alive_state_update(df)
                    kafka_cluster_proxy.send_heartbeat_message(
                        topic, dumps(message))
                else:
                    Probe.kafka_cluster_proxy_running = False
                    self.log.error('kafka-proxy-unavailable')
            except Exception as e:
                self.log.exception('failed-sending-message-heartbeat', e=e)

        def check_heartbeat_delivery():
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy:
                    kafka_cluster_proxy.check_heartbeat_delivery()
            except Exception as e:
                self.log.exception('failed-checking-heartbeat-delivery', e=e)

        def schedule_periodic_heartbeat():
            try:
                # Sending the heartbeat message in a loop
                lc_heartbeat = LoopingCall(send_heartbeat_msg)
                lc_heartbeat.start(10)
                # Polling the delivery status more frequently to get early notification
                lc_poll = LoopingCall(check_heartbeat_delivery)
                lc_poll.start(2)
            except Exception as e:
                self.log.exception('failed-kafka-heartbeat-startup', e=e)

        from twisted.internet import reactor
        # Delaying heartbeat initially to let kafka connection be established
        reactor.callLater(5, schedule_periodic_heartbeat)

    # Receiving the callback and updating the probe accordingly
    def process_kafka_alive_state_update(self, alive_state):
        self.log.debug('process-kafka-alive-state-update',
                       alive_state=alive_state)
        Probe.kafka_cluster_proxy_running = alive_state

        kafka_cluster_proxy = get_kafka_proxy()
        if kafka_cluster_proxy:
            Probe.kafka_proxy_faulty = kafka_cluster_proxy.is_faulty()
예제 #5
0
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(
                    self.args.consul,
                    self.args.kafka_cluster,
                    config=self.config.get('kafka-cluster-proxy', {})
                )
            ).start()
            Probe.kafka_cluster_proxy_running = True

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy, adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(adapter=self.adapter,
                                                           core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler
                )
            ).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.register_adapter_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)
예제 #6
0
class Main(object):

    def __init__(self):

        self.args = args = parse_args()
        self.config = load_config(args)

        # log levels in python are:
        # 1 - DEBUG => verbosity_adjust = 1
        # 2 - INFO => verbosity_adjust = 2
        # 3 - WARNING => verbosity_adjust = 3
        # 4 - ERROR => verbosity_adjust = 4
        # 5 - CRITICAL => verbosity_adjust = 5

        verbosity_adjust = string_to_int(str(args.log_level))
        if verbosity_adjust == -1:
            print("Invalid loglevel is given: " + str(args.log_level))
            sys.exit(0)

        self.log = setup_logging(self.config.get('logging', {}),
                                 args.instance_id,
                                 verbosity_adjust=verbosity_adjust)
        self.log.info('container-number-extractor',
                      regex=args.container_name_regex)

        self.build_info = get_build_info()
        self.log.info('OpenONU-Adapter-Version', build_version=self.build_info)

        if not args.no_banner:
            print_banner(self.log)

        self.etcd_host = str(args.etcd).split(':')[0]
        self.etcd_port = str(args.etcd).split(':')[1]

        self.controller = LogController(self.etcd_host, self.etcd_port)

        self.adapter = None
        # Create a unique instance id using the passed-in instance id and
        # UTC timestamp
        current_time = arrow.utcnow().timestamp
        self.instance_id = self.args.instance_id + '_' + str(current_time)

        self.core_topic = str(args.core_topic)
        self.event_topic = str(args.event_topic)
        self.listening_topic = str(args.name)
        self.startup_components()

        if not args.no_heartbeat:
            self.start_heartbeat()
            self.start_kafka_cluster_heartbeat(self.instance_id)

    def start(self):
        self.start_reactor()  # will not return except Keyboard interrupt

    def stop(self):
        pass

    def get_args(self):
        """Allow access to command line args"""
        return self.args

    def get_config(self):
        """Allow access to content of config file"""
        return self.config

    def _get_adapter_config(self):
        cfg = AdapterConfig()
        return cfg

    @inlineCallbacks
    def startup_components(self):
        try:
            self.log.info('starting-internal-components',
                          consul=self.args.consul,
                          etcd=self.args.etcd)

            registry.register('main', self)

            yield registry.register(
                'kafka_cluster_proxy',
                KafkaProxy(
                    self.args.consul,
                    self.args.kafka_cluster,
                    config=self.config.get('kafka-cluster-proxy', {})
                )
            ).start()
            Probe.kafka_cluster_proxy_running = True

            config = self._get_adapter_config()

            self.core_proxy = CoreProxy(
                kafka_proxy=None,
                default_core_topic=self.core_topic,
                default_event_topic=self.event_topic,
                my_listening_topic=self.listening_topic)

            self.adapter_proxy = AdapterProxy(
                kafka_proxy=None,
                core_topic=self.core_topic,
                my_listening_topic=self.listening_topic)

            self.adapter = BrcmOpenomciOnuAdapter(
                core_proxy=self.core_proxy, adapter_proxy=self.adapter_proxy,
                config=config,
                build_info=self.build_info)

            self.adapter.start()

            openonu_request_handler = AdapterRequestFacade(adapter=self.adapter,
                                                           core_proxy=self.core_proxy)

            yield registry.register(
                'kafka_adapter_proxy',
                IKafkaMessagingProxy(
                    kafka_host_port=self.args.kafka_adapter,
                    # TODO: Add KV Store object reference
                    kv_store=self.args.backend,
                    default_topic=self.args.name,
                    group_id_prefix=self.args.instance_id,
                    target_cls=openonu_request_handler
                )
            ).start()
            Probe.kafka_adapter_proxy_running = True

            self.core_proxy.kafka_proxy = get_messaging_proxy()
            self.adapter_proxy.kafka_proxy = get_messaging_proxy()

            # retry for ever
            res = yield self._register_with_core(-1)
            Probe.register_adapter_with_core = True

            self.log.info('started-internal-services')

        except Exception as e:
            self.log.exception('Failure-to-start-all-components', e=e)

    @inlineCallbacks
    def shutdown_components(self):
        """Execute before the reactor is shut down"""
        self.log.info('exiting-on-keyboard-interrupt')
        for component in reversed(registry.iterate()):
            yield component.stop()

        import threading
        self.log.info('THREADS:')
        main_thread = threading.current_thread()
        for t in threading.enumerate():
            if t is main_thread:
                continue
            if not t.isDaemon():
                continue
            self.log.info('joining thread {} {}'.format(
                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
            t.join()

    def start_reactor(self):
        from twisted.internet import reactor, threads
        reactor.callWhenRunning(
            lambda: self.log.info('twisted-reactor-started'))
        reactor.addSystemEventTrigger('before', 'shutdown',
                                      self.shutdown_components)
        reactor.callInThread(self.start_probe)
        threads.deferToThread(self.controller.start_watch_log_config_change, self.args.instance_id, str(self.args.log_level))
        reactor.run()

    def start_probe(self):
        args = self.args
        host = args.probe.split(':')[0]
        port = args.probe.split(':')[1]
        server = socketserver.TCPServer((host, int(port)), Probe)
        server.serve_forever()

    @inlineCallbacks
    def _register_with_core(self, retries):
        while 1:
            try:
                resp = yield self.core_proxy.register(
                    self.adapter.adapter_descriptor(),
                    self.adapter.device_types())
                if resp:
                    self.log.info('registered-with-core',
                                  coreId=resp.instance_id)

                returnValue(resp)
            except TimeOutError as e:
                self.log.warn("timeout-when-registering-with-core", e=e)
                if retries == 0:
                    self.log.exception("no-more-retries", e=e)
                    raise
                else:
                    retries = retries if retries < 0 else retries - 1
                    yield asleep(defs['retry_interval'])
            except Exception as e:
                self.log.exception("failed-registration", e=e)
                raise

    def start_heartbeat(self):

        t0 = time.time()
        t0s = time.ctime(t0)

        def heartbeat():
            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)

        lc = LoopingCall(heartbeat)
        lc.start(10)

    # Temporary function to send a heartbeat message to the external kafka
    # broker
    def start_kafka_cluster_heartbeat(self, instance_id):
        # For heartbeat we will send a message to a specific "voltha-heartbeat"
        #  topic.  The message is a protocol buf
        # message
        message = dict(
            type='heartbeat',
            adapter=self.args.name,
            instance=instance_id,
            ip=get_my_primary_local_ipv4()
        )
        topic = defs['heartbeat_topic']

        def send_msg(start_time):
            try:
                kafka_cluster_proxy = get_kafka_proxy()
                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
                    # self.log.debug('kafka-proxy-available')
                    message['ts'] = arrow.utcnow().timestamp
                    message['uptime'] = time.time() - start_time
                    # self.log.debug('start-kafka-heartbeat')
                    kafka_cluster_proxy.send_message(topic, dumps(message))
                else:
                    Probe.kafka_cluster_proxy_running = False
                    self.log.error('kafka-proxy-unavailable')
            except Exception as e:
                self.log.exception('failed-sending-message-heartbeat', e=e)

        try:
            t0 = time.time()
            lc = LoopingCall(send_msg, t0)
            lc.start(10)
        except Exception as e:
            self.log.exception('failed-kafka-heartbeat', e=e)
예제 #7
0
class TestCoreProxy(TestCase):
    def setUp(self):
        self.core_proxy = CoreProxy(kafka_proxy=None,
                                    default_core_topic='test_core',
                                    default_event_topic='test.events',
                                    my_listening_topic='test_openonu')

        self.supported_device_types = [
            DeviceType(id="brmc_openonu",
                       vendor_ids=['BBSM'],
                       adapter="openonu",
                       accepts_bulk_flow_update=False,
                       accepts_add_remove_flow_updates=True)
        ]

    @defer.inlineCallbacks
    def test_register_defaults(self):
        adapter = Adapter(
            id="testAdapter",
            vendor="ONF",
            version="test",
        )

        expected_adapter = Adapter(id="testAdapter",
                                   vendor="ONF",
                                   version="test",
                                   currentReplica=1,
                                   totalReplicas=1)

        with patch.object(self.core_proxy, "invoke") as mock_invoke:

            mock_invoke.return_value = "success"

            res = yield self.core_proxy.register(adapter,
                                                 self.supported_device_types)
            mock_invoke.assert_called_with(
                rpc="Register",
                adapter=expected_adapter,
                deviceTypes=self.supported_device_types)
            self.assertTrue(mock_invoke.call_count, 1)
            self.assertEqual(res, "success")

    @defer.inlineCallbacks
    def test_register_multiple(self):

        adapter = Adapter(id="testAdapter",
                          vendor="ONF",
                          version="test",
                          currentReplica=4,
                          totalReplicas=8)

        with patch.object(self.core_proxy, "invoke") as mock_invoke:
            mock_invoke.return_value = "success"

            res = yield self.core_proxy.register(adapter,
                                                 self.supported_device_types)
            mock_invoke.assert_called_with(
                rpc="Register",
                adapter=adapter,
                deviceTypes=self.supported_device_types)

    @defer.inlineCallbacks
    def test_register_misconfigured(self):
        """
        In case the operator sets wrong parameter, eg: currentReplica=10, totalReplicas=2
        raise an exception
        """
        adapter = Adapter(id="testAdapter",
                          vendor="ONF",
                          version="test",
                          currentReplica=10,
                          totalReplicas=8)

        with self.assertRaises(Exception) as e:
            res = yield self.core_proxy.register(adapter,
                                                 self.supported_device_types)

        self.assertEqual(
            str(e.exception),
            "currentReplica (10) can't be greater than totalReplicas (8)")

        adapter = Adapter(id="testAdapter",
                          vendor="ONF",
                          version="test",
                          totalReplicas=0,
                          currentReplica=1)

        with self.assertRaises(Exception) as e:
            res = yield self.core_proxy.register(adapter,
                                                 self.supported_device_types)

        self.assertEqual(
            str(e.exception),
            "totalReplicas can't be 0, since you're here you have at least one"
        )

        adapter = Adapter(id="testAdapter",
                          vendor="ONF",
                          version="test",
                          totalReplicas=1,
                          currentReplica=0)

        with self.assertRaises(Exception) as e:
            res = yield self.core_proxy.register(adapter,
                                                 self.supported_device_types)

        self.assertEqual(str(e.exception),
                         "currentReplica can't be 0, it has to start from 1")