def setUp(self): registry.register( 'core', VolthaCore(instance_id=1, core_store_id=1, grpc_port=50060, version="1", log_level=LogLevel.INFO)).start() self.adapter_agent_ont = AdapterAgent("broadcom_onu", "BroadcomOnuAdapter") self.adapter_agent_olt = AdapterAgent("asfvolt16_olt", "Asfvolt16Adapter") # create and update the core with Broadcom ONU device type self.onu_device_type = DeviceType(id='broadcom_onu', vendor_id='BRCM', adapter='broadcom_onu', accepts_bulk_flow_update=True) # create and update the core with Broadcom ONU device type self.olt_device_type = DeviceType(id='asfvolt16_olt', vendor_id='Edgecore', adapter='asfvolt16_olt', accepts_bulk_flow_update=True) self.adapter_agent_ont._make_up_to_date('/device_types', 'broadcom_onu', self.onu_device_type) self.adapter_agent_olt._make_up_to_date('/device_types', 'asfvolt16_olt', self.olt_device_type)
def setUp(self): registry.register( 'core', VolthaCore( instance_id=1, core_store_id=1, grpc_port=50060, version="1", log_level=LogLevel.INFO ) ).start() self.adapter_agent_ont = AdapterAgent("broadcom_onu", "BroadcomOnuAdapter") self.adapter_agent_olt = AdapterAgent("asfvolt16_olt", "Asfvolt16Adapter") # create and update the core with Broadcom ONU device type self.onu_device_type = DeviceType( id='broadcom_onu', vendor_id='BRCM', adapter='broadcom_onu', accepts_bulk_flow_update=True ) # create and update the core with Broadcom ONU device type self.olt_device_type = DeviceType( id='asfvolt16_olt', vendor_id='Edgecore', adapter='asfvolt16_olt', accepts_bulk_flow_update=True ) self.adapter_agent_ont._make_up_to_date('/device_types', 'broadcom_onu', self.onu_device_type) self.adapter_agent_olt._make_up_to_date('/device_types', 'asfvolt16_olt', self.olt_device_type)
def startup_components(self): try: self.log.info('starting-internal-components') registry.register('main', self) yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.args.instance_id, config=self.config, consul=self.args.consul)).start() init_rest_service(self.args.rest_port) yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register( 'core', VolthaCore(instance_id=self.args.instance_id, version=VERSION, log_level=LogLevel.INFO)).start( config_backend=load_backend(self.args)) yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure to start all components {}'.format(e))
def startup_components(self): try: self.log.info('starting-internal-components', internal_host=self.args.internal_host_address, external_host=self.args.external_host_address, interface=self.args.interface, consul=self.args.consul, etcd=self.args.etcd) registry.register('main', self) if self.args.backend == 'consul': yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul, container_name_regex=self.args.container_name_regex) ).start() elif self.args.backend == 'etcd': yield registry.register( 'coordinator', CoordinatorEtcd( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, etcd=self.args.etcd, container_name_regex=self.args.container_name_regex) ).start() self.log.info('waiting-for-config-assignment') # Wait until we get a config id before we proceed self.core_store_id, store_prefix = \ yield registry('coordinator').get_core_store_id_and_prefix() self.log.info('store-id', core_store_id=self.core_store_id) # Update the logger to output the vcore id. self.log = update_logging(instance_id=self.instance_id, vcore_id=self.core_store_id) yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register('openolt_kafka_proxy', OpenoltKafkaProxy( self.args.kafka)).start() yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'core', VolthaCore(instance_id=self.instance_id, core_store_id=self.core_store_id, grpc_port=self.args.grpc_port, version=self.voltha_version, log_level=LogLevel.INFO) ).start(config_backend=load_backend(store_id=self.core_store_id, store_prefix=store_prefix, args=self.args)) init_rest_service(self.args.rest_port) yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) # Now that all components are loaded, in the scenario where this # voltha instance is picking up an existing set of data (from a # voltha instance that dies/stopped) then we need to setup this # instance from where the previous one left yield registry('core').reconcile_data() # Now that the data is in memory and the reconcile process # within the core has completed (the reconciliation may still be # in progress with the adapters) we expose the NBI of voltha core yield registry('core').register_grpc_service() self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure-to-start-all-components', e=e)
def startup_components(self): try: self.log.info('starting-internal-components', internal_host=self.args.internal_host_address, external_host=self.args.external_host_address, interface=self.args.interface, consul=self.args.consul) registry.register('main', self) yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul)).start() self.log.info('waiting-for-config-assignment') # Wait until we get a config id before we proceed self.core_store_id, store_prefix = \ yield registry('coordinator').get_core_store_id_and_prefix() self.log.info('store-id', core_store_id=self.core_store_id) yield registry.register('grpc_server', VolthaGrpcServer( self.args.grpc_port)).start() yield registry.register( 'core', VolthaCore(instance_id=self.instance_id, core_store_id=self.core_store_id, grpc_port=self.args.grpc_port, version=VERSION, log_level=LogLevel.INFO) ).start(config_backend=load_backend(store_id=self.core_store_id, store_prefix=store_prefix, args=self.args)) init_rest_service(self.args.rest_port) yield registry.register( 'kafka_proxy', KafkaProxy(self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}))).start() yield registry.register('frameio', FrameIOManager()).start() yield registry.register( 'adapter_loader', AdapterLoader( config=self.config.get('adapter_loader', {}))).start() yield registry.register( 'diag', Diagnostics( config=self.config.get('diagnostics', {}))).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) # Now that all components are loaded, in the scenario where this # voltha instance is picking up an existing set of data (from a # voltha instance that dies/stopped) then we need to setup this # instance from where the previous one left yield registry('core').reconcile_data() self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure-to-start-all-components', e=e)
ind.olt_ind.oper_state = 'down' kafka_send_pb(default_topic, ind) break else: log.debug("openolt grpc rx indication", indication=ind) if ind.HasField('pkt_ind'): kafka_send_pb(pktin_topic, ind) else: kafka_send_pb(default_topic, ind) if __name__ == '__main__': if len(sys.argv) < 2: sys.stderr.write('Usage: %s <olt hostname or ip>\n\n' % sys.argv[0]) sys.exit(1) broker = sys.argv[1] host = sys.argv[2] log = setup_logging(yaml.load(open('./logconfig.yml', 'r')), host, verbosity_adjust=0, cache_on_use=True) kafka_proxy = registry.register('openolt_kafka_proxy', OpenoltKafkaProxy(broker)).start() while True: process_indications(host) time.sleep(5)
def startup_components(self): try: self.log.info('starting-internal-components', internal_host=self.args.internal_host_address, external_host=self.args.external_host_address, interface=self.args.interface, consul=self.args.consul, etcd=self.args.etcd) registry.register('main', self) if self.args.backend == 'consul': yield registry.register( 'coordinator', Coordinator( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul) ).start() elif self.args.backend == 'etcd': yield registry.register( 'coordinator', CoordinatorEtcd( internal_host_address=self.args.internal_host_address, external_host_address=self.args.external_host_address, rest_port=self.args.rest_port, instance_id=self.instance_id, config=self.config, consul=self.args.consul, etcd=self.args.etcd) ).start() self.log.info('waiting-for-config-assignment') # Wait until we get a config id before we proceed self.core_store_id, store_prefix = \ yield registry('coordinator').get_core_store_id_and_prefix() self.log.info('store-id', core_store_id=self.core_store_id) # Update the logger to output the vcore id. self.log = update_logging(instance_id=self.instance_id, vcore_id=self.core_store_id) yield registry.register( 'grpc_server', VolthaGrpcServer(self.args.grpc_port) ).start() yield registry.register( 'core', VolthaCore( instance_id=self.instance_id, core_store_id = self.core_store_id, grpc_port=self.args.grpc_port, version=VERSION, log_level=LogLevel.INFO ) ).start(config_backend=load_backend(store_id=self.core_store_id, store_prefix=store_prefix, args=self.args)) init_rest_service(self.args.rest_port) yield registry.register( 'kafka_proxy', KafkaProxy( self.args.consul, self.args.kafka, config=self.config.get('kafka-proxy', {}) ) ).start() yield registry.register( 'frameio', FrameIOManager() ).start() yield registry.register( 'adapter_loader', AdapterLoader(config=self.config.get('adapter_loader', {})) ).start() yield registry.register( 'diag', Diagnostics(config=self.config.get('diagnostics', {})) ).start() if self.args.manhole_port is not None: self.start_manhole(self.args.manhole_port) # Now that all components are loaded, in the scenario where this # voltha instance is picking up an existing set of data (from a # voltha instance that dies/stopped) then we need to setup this # instance from where the previous one left yield registry('core').reconcile_data() # Now that the data is in memory and the reconcile process # within the core has completed (the reconciliation may still be # in progress with the adapters) we expose the NBI of voltha core yield registry('core').register_grpc_service() self.log.info('started-internal-services') except Exception as e: self.log.exception('Failure-to-start-all-components', e=e)