def main(): """ main() for gateway state replication service """ service = MagmaService('state', mconfigs_pb2.State()) # Optionally pipe errors to Sentry sentry_init() # _grpc_client_manager to manage grpc client recycling grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Garbage collector propagates state deletions back to Orchestrator garbage_collector = GarbageCollector(service, grpc_client_manager) # Start state replication loop state_manager = StateReplicator(service, garbage_collector, grpc_client_manager) state_manager.start() # Run the service loop service.run() # Cleanup the service service.close()
def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) service = MagicMock() service.config = { # Replicate arbitrary orc8r protos 'state_protos': [ { 'proto_file': 'orc8r.protos.common_pb2', 'proto_msg': 'NetworkID', 'redis_key': NID_TYPE, 'state_scope': 'network' }, { 'proto_file': 'orc8r.protos.service303_pb2', 'proto_msg': 'LogVerbosity', 'redis_key': LOG_TYPE, 'state_scope': 'gateway' }, ], 'json_state': [{ 'redis_key': FOO_TYPE, 'state_scope': 'gateway' }] } service.loop = self.loop # Bind the rpc server to a free port self._rpc_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10)) port = self._rpc_server.add_insecure_port('0.0.0.0:0') # Add the servicer self._servicer = DummyStateServer() self._servicer.add_to_server(self._rpc_server) self._rpc_server.start() # Create a rpc stub self.channel = grpc.insecure_channel('0.0.0.0:{}'.format(port)) serde1 = RedisSerde(NID_TYPE, get_proto_serializer(), get_proto_deserializer(NetworkID)) serde2 = RedisSerde(FOO_TYPE, get_json_serializer(), get_json_deserializer()) serde3 = RedisSerde(LOG_TYPE, get_proto_serializer(), get_proto_deserializer(LogVerbosity)) self.nid_client = RedisFlatDict(get_default_client(), serde1) self.foo_client = RedisFlatDict(get_default_client(), serde2) self.log_client = RedisFlatDict(get_default_client(), serde3) # Set up and start garbage collecting loop grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Start state garbage collection loop self.garbage_collector = GarbageCollector(service, grpc_client_manager)
def setUp(self): """Initialize client tests""" # Create sqlite3 database for testing self._tmpfile = tempfile.TemporaryDirectory() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) store = SqliteStore( '{filename}{slash}'.format( filename=self._tmpfile.name, slash='/', ), ) ServiceRegistry.add_service('test', '0.0.0.0', 0) # noqa: S104 ServiceRegistry._PROXY_CONFIG = { 'local_port': 1234, 'cloud_address': '', 'proxy_cloud_connections': False, } ServiceRegistry._REGISTRY = { "services": { "s6a_service": { "ip_address": "0.0.0.0", # noqa: S104 "port": 2345, }, }, } self.service = MagicMock() self.service.loop = self.loop # Bind the rpc server to a free port self._rpc_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10), ) port = self._rpc_server.add_insecure_port('0.0.0.0:0') # Add the servicer self._servicer = MockSubscriberDBServer() self._servicer.add_to_server(self._rpc_server) self._rpc_server.start() # Create a rpc stub self.channel = grpc.insecure_channel( '0.0.0.0:{port}'.format( port=port, ), ) grpc_client_manager = GRPCClientManager( service_name="subscriberdb", service_stub=SubscriberDBCloudStub, max_client_reuse=60, ) self.subscriberdb_cloud_client = SubscriberDBCloudClient( loop=self.service.loop, store=store, subscriber_page_size=2, sync_interval=10, grpc_client_manager=grpc_client_manager, ) self.subscriberdb_cloud_client.start()
def main(): """ main() for gateway state replication service """ service = MagmaService('state', mconfigs_pb2.State()) # _grpc_client_manager to manage grpc client recycling grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Start state replication loop state_manager = StateReplicator(service, grpc_client_manager) state_manager.start() # Run the service loop service.run() # Cleanup the service service.close()
def setUp(self): ServiceRegistry.add_service('test1', '0.0.0.0', 0) ServiceRegistry.add_service('test2', '0.0.0.0', 0) ServiceRegistry.add_service('test3', '0.0.0.0', 0) self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) service = MagicMock() service.config = { 'magma_services': ['test1', 'test2', 'test3'], 'non_service303_services': ['test2'], 'skip_checkin_if_missing_meta_services': ['test3'], } service.mconfig.checkin_interval = 60 service.mconfig.checkin_timeout = 30 service.mconfig_metadata.created_at = 0 service.version = "1.1.1.1" service.loop = self.loop # Bind the rpc server to a free port self._rpc_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10)) port = self._rpc_server.add_insecure_port('0.0.0.0:0') # Add the servicer self._servicer = DummpyStateServer() self._servicer.add_to_server(self._rpc_server) self._rpc_server.start() # Create a rpc stub self.channel = grpc.insecure_channel('0.0.0.0:{}'.format(port)) # Set up and start state reporting loop self.service_poller = unittest.mock.Mock() self.service_poller.service_info = unittest.mock.Mock() self.service_poller.service_info.return_value = {} # Mock out GatewayStatusFactory gateway_status_factory = \ GatewayStatusFactory(service, self.service_poller, None) # Mock _system_status since it tried to read from /data/flash gateway_status_factory._system_status = unittest.mock.Mock() gateway_status_factory._system_status.return_value = \ self.default_system_status() grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Mock out bootstrap manager bootstrap_manager = unittest.mock.Mock() bootstrap_manager.schedule_bootstrap_now = unittest.mock.Mock() bootstrap_manager.schedule_bootstrap_now.return_value = None self.state_reporter = StateReporter( config=service.config, mconfig=service.mconfig, loop=service.loop, bootstrap_manager=bootstrap_manager, gw_status_factory=gateway_status_factory, grpc_client_manager=grpc_client_manager, ) self.state_reporter.FAIL_THRESHOLD = 0 self.state_reporter.start()
def main(): """ Main magmad function """ service = MagmaService('magmad', mconfigs_pb2.MagmaD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) logging.info('Starting magmad for UUID: %s', snowflake.make_snowflake()) # Create service manager services = service.config.get('magma_services') init_system = service.config.get('init_system', 'systemd') registered_dynamic_services = service.config.get( 'registered_dynamic_services', [], ) enabled_dynamic_services = [] if service.mconfig is not None: enabled_dynamic_services = service.mconfig.dynamic_services # Poll the services' Service303 interface service_poller = ServicePoller( service.loop, service.config, enabled_dynamic_services, ) service_poller.start() service_manager = ServiceManager( services, init_system, service_poller, registered_dynamic_services, enabled_dynamic_services, ) # Get metrics service config metrics_config = service.config.get('metricsd') metrics_services = metrics_config['services'] collect_interval = metrics_config['collect_interval'] sync_interval = metrics_config['sync_interval'] grpc_timeout = metrics_config['grpc_timeout'] grpc_msg_size = metrics_config.get('max_grpc_msg_size_mb', 4) metrics_post_processor_fn = metrics_config.get('post_processing_fn') metric_scrape_targets = [ ScrapeTarget(t['url'], t['name'], t['interval']) for t in metrics_config.get('metric_scrape_targets', []) ] # Create local metrics collector metrics_collector = MetricsCollector( services=metrics_services, collect_interval=collect_interval, sync_interval=sync_interval, grpc_timeout=grpc_timeout, grpc_max_msg_size_mb=grpc_msg_size, loop=service.loop, post_processing_fn=get_metrics_postprocessor_fn( metrics_post_processor_fn, ), scrape_targets=metric_scrape_targets, ) # Poll and sync the metrics collector loops metrics_collector.run() # Start a background thread to stream updates from the cloud stream_client = None if service.config.get('enable_config_streamer', False): stream_client = StreamerClient( { CONFIG_STREAM_NAME: ConfigManager( services, service_manager, service, MconfigManagerImpl(), ), }, service.loop, ) # Create sync rpc client with a heartbeat of 30 seconds (timeout = 60s) sync_rpc_client = None if service.config.get('enable_sync_rpc', False): sync_rpc_client = SyncRPCClient( service.loop, 30, service.config.get('print_grpc_payload', False), ) first_time_bootstrap = True # This is called when bootstrap succeeds and when _bootstrap_check is # invoked but bootstrap is not needed. If it's invoked right after certs # are generated, certs_generated is true, control_proxy will restart. async def bootstrap_success_cb(certs_generated: bool): nonlocal first_time_bootstrap if first_time_bootstrap: if stream_client: stream_client.start() if sync_rpc_client: sync_rpc_client.start() first_time_bootstrap = False if certs_generated: svcs_to_restart = [] if 'control_proxy' in services: svcs_to_restart.append('control_proxy') # fluent-bit caches TLS client certs in memory, so we need to # restart it whenever the certs change fresh_mconfig = get_mconfig_manager().load_service_mconfig( 'magmad', mconfigs_pb2.MagmaD(), ) dynamic_svcs = fresh_mconfig.dynamic_services or [] if 'td-agent-bit' in dynamic_svcs: svcs_to_restart.append('td-agent-bit') await service_manager.restart_services(services=svcs_to_restart) # Create bootstrap manager bootstrap_manager = BootstrapManager(service, bootstrap_success_cb) # Initialize kernel version poller if it is enabled kernel_version_poller = None if service.config.get('enable_kernel_version_checking', False): kernel_version_poller = KernelVersionsPoller(service) kernel_version_poller.start() # gateway status generator to bundle various information about this # gateway into an object. gateway_status_factory = GatewayStatusFactory( service=service, service_poller=service_poller, kernel_version_poller=kernel_version_poller, ) # _grpc_client_manager to manage grpc client recycling grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Initialize StateReporter state_reporter = StateReporter( config=service.config, mconfig=service.mconfig, loop=service.loop, bootstrap_manager=bootstrap_manager, gw_status_factory=gateway_status_factory, grpc_client_manager=grpc_client_manager, ) # Initialize ServiceHealthWatchdog service_health_watchdog = ServiceHealthWatchdog( config=service.config, loop=service.loop, service_poller=service_poller, service_manager=service_manager, ) # Start _bootstrap_manager bootstrap_manager.start_bootstrap_manager() # Start all services when magmad comes up service.loop.create_task(service_manager.start_services()) # Start state reporting loop state_reporter.start() # Start service timeout health check loop service_health_watchdog.start() # Start upgrade manager loop if service.config.get('enable_upgrade_manager', False): upgrader = _get_upgrader_impl(service) service.loop.create_task(start_upgrade_loop(service, upgrader)) # Start network health metric collection if service.config.get('enable_network_monitor', False): service.loop.create_task(metrics_collection_loop(service.config)) # Create generic command executor command_executor = None if service.config.get('generic_command_config', None): command_executor = get_command_executor_impl(service) # Start loop to monitor unattended upgrade status service.loop.create_task(monitor_unattended_upgrade_status()) # Add all servicers to the server magmad_servicer = MagmadRpcServicer( service, services, service_manager, get_mconfig_manager(), command_executor, service.loop, service.config.get('print_grpc_payload', False), ) magmad_servicer.add_to_server(service.rpc_server) if SDWatchdog.has_notify(): # Create systemd watchdog sdwatchdog = SDWatchdog( tasks=[bootstrap_manager, state_reporter], update_status=True, ) # Start watchdog loop service.loop.create_task(sdwatchdog.run()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """Main routine for subscriberdb service.""" # noqa: D401 service = MagmaService('subscriberdb', mconfigs_pb2.SubscriberDB()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) # Initialize a store to keep all subscriber data. store = SqliteStore( service.config['db_path'], loop=service.loop, sid_digits=service.config['sid_last_n'], ) # Initialize the processor processor = Processor( store, get_default_sub_profile(service), service.mconfig.sub_profiles, service.mconfig.lte_auth_op, service.mconfig.lte_auth_amf, ) # Add all servicers to the server subscriberdb_servicer = SubscriberDBRpcServicer( store, service.config.get('print_grpc_payload', False), ) subscriberdb_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: grpc_client_manager = GRPCClientManager( service_name="subscriberdb", service_stub=SubscriberDBCloudStub, max_client_reuse=60, ) sync_interval = _randomize_sync_interval( service.config.get('subscriberdb_sync_interval'), ) subscriber_page_size = service.config.get('subscriber_page_size') subscriberdb_cloud_client = SubscriberDBCloudClient( service.loop, store, subscriber_page_size, sync_interval, grpc_client_manager, ) subscriberdb_cloud_client.start() else: logging.info( 'enable_streaming set to False. Subscriber streaming ' 'disabled!', ) # Wait until the datastore is populated by addition or resync before # listening for clients. async def serve(): # noqa: WPS430 if not store.list_subscribers(): # Waiting for subscribers to be added to store await store.on_ready() if service.config['s6a_over_grpc']: logging.info('Running s6a over grpc') s6a_proxy_servicer = S6aProxyRpcServicer( processor, service.config.get('print_grpc_payload', False), ) s6a_proxy_servicer.add_to_server(service.rpc_server) else: logging.info('Running s6a over DIAMETER') base_manager = base.BaseApplication( service.config['mme_realm'], service.config['mme_host_name'], service.config['mme_host_address'], ) s6a_manager = _get_s6a_manager(service, processor) base_manager.register(s6a_manager) # Setup the Diameter/s6a MME s6a_server = service.loop.create_server( lambda: S6aServer( base_manager, s6a_manager, service.config['mme_realm'], service.config['mme_host_name'], loop=service.loop, ), service.config['host_address'], service.config['mme_port'], ) asyncio.ensure_future(s6a_server, loop=service.loop) asyncio.ensure_future(serve(), loop=service.loop) # Run the service loop service.run() # Cleanup the service service.close()
def setUp(self): self.mock_redis = fakeredis.FakeStrictRedis() self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) service = MagicMock() service.config = { # Replicate arbitrary orc8r protos 'state_protos': [ { 'proto_file': 'orc8r.protos.common_pb2', 'proto_msg': 'NetworkID', 'redis_key': NID_TYPE, 'state_scope': 'network', }, { 'proto_file': 'orc8r.protos.common_pb2', 'proto_msg': 'IDList', 'redis_key': IDList_TYPE, 'state_scope': 'gateway', }, { 'proto_file': 'orc8r.protos.service303_pb2', 'proto_msg': 'LogVerbosity', 'redis_key': LOG_TYPE, 'state_scope': 'gateway', }, ], 'json_state': [{ 'redis_key': FOO_TYPE, 'state_scope': 'network' }], } service.loop = self.loop # Bind the rpc server to a free port self._rpc_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10), ) port = self._rpc_server.add_insecure_port('0.0.0.0:0') # Add the servicer self._servicer = DummyStateServer() self._servicer.add_to_server(self._rpc_server) self._rpc_server.start() # Create a rpc stub self.channel = grpc.insecure_channel('0.0.0.0:{}'.format(port)) serde1 = RedisSerde( NID_TYPE, get_proto_serializer(), get_proto_deserializer(NetworkID), ) serde2 = RedisSerde( IDList_TYPE, get_proto_serializer(), get_proto_deserializer(IDList), ) serde3 = RedisSerde( LOG_TYPE, get_proto_serializer(), get_proto_deserializer(LogVerbosity), ) serde4 = RedisSerde( FOO_TYPE, get_json_serializer(), get_json_deserializer(), ) self.nid_client = RedisFlatDict(self.mock_redis, serde1) self.idlist_client = RedisFlatDict(self.mock_redis, serde2) self.log_client = RedisFlatDict(self.mock_redis, serde3) self.foo_client = RedisFlatDict(self.mock_redis, serde4) # Set up and start state replicating loop grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # mock the get_default_client function used to return the same # fakeredis object func_mock = mock.MagicMock(return_value=self.mock_redis) with mock.patch( 'magma.state.redis_dicts.get_default_client', func_mock, ): garbage_collector = GarbageCollector(service, grpc_client_manager) self.state_replicator = StateReplicator( service=service, garbage_collector=garbage_collector, grpc_client_manager=grpc_client_manager, ) self.state_replicator.start()