class Service303Tests(TestCase): """ Tests for the MagmaService and the Service303 interface """ @mock.patch('time.time', mock.MagicMock(return_value=12345)) def setUp(self): ServiceRegistry.add_service('test', '0.0.0.0', 0) self._stub = None self._loop = asyncio.new_event_loop() # Use a new event loop to ensure isolated tests self._service = MagmaService( name='test', empty_mconfig=mconfigs_pb2.MagmaD(), loop=self._loop, ) asyncio.set_event_loop(self._service.loop) @mock.patch( 'magma.common.service_registry.ServiceRegistry.get_proxy_config', ) def test_service_run(self, mock_get_proxy_config): """ Test if the service starts and stops gracefully. """ self.assertEqual(self._service.state, ServiceInfo.STARTING) mock_get_proxy_config.return_value = { 'cloud_address': '127.0.0.1', 'proxy_cloud_connections': True, } # Start the service and pause the loop self._service.loop.stop() self._service.run() asyncio.set_event_loop(self._service.loop) self._service.log_counter._periodic_task.cancel() self.assertEqual(self._service.state, ServiceInfo.ALIVE) # Create a rpc stub and query the Service303 interface ServiceRegistry.add_service('test', '0.0.0.0', self._service.port) channel = ServiceRegistry.get_rpc_channel( 'test', ServiceRegistry.LOCAL, ) self._stub = Service303Stub(channel) info = ServiceInfo( name='test', version='0.0.0', state=ServiceInfo.ALIVE, health=ServiceInfo.APP_HEALTHY, start_time_secs=12345, ) self.assertEqual(self._stub.GetServiceInfo(Void()), info) # Stop the service self._stub.StopService(Void()) self._service.loop.run_forever() self.assertEqual(self._service.state, ServiceInfo.STOPPED)
def main(): """ Top-level function for enodebd """ service = MagmaService('enodebd', mconfigs_pb2.EnodebD()) logger.init() # Optionally pipe errors to Sentry sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config) # State machine manager for tracking multiple connected eNB devices. state_machine_manager = StateMachineManager(service) # Statistics manager stats_mgr = StatsManager(state_machine_manager) stats_mgr.run() # Start TR-069 thread server_thread = Thread( target=tr069_server, args=(state_machine_manager, ), daemon=True, ) server_thread.start() print_grpc_payload = service.config.get('print_grpc_payload', False) # Add all servicers to the server enodebd_servicer = EnodebdRpcServicer( state_machine_manager, print_grpc_payload, ) enodebd_servicer.add_to_server(service.rpc_server) # Register function to get service status def get_enodebd_status(): return get_service_status_old(state_machine_manager) service.register_get_status_callback(get_enodebd_status) # Register a callback function for GetOperationalStates service303 function def get_enodeb_operational_states() -> List[State]: return get_operational_states( state_machine_manager, service.mconfig, print_grpc_payload, ) service.register_operational_states_callback(get_enodeb_operational_states) # Set eNodeBD iptables rules due to exposing public IP to eNodeB service.loop.create_task(set_enodebd_iptables_rule()) # Run the service loop service.run() # Cleanup the service service.close()
def setUp(self): ServiceRegistry.add_service('test', '0.0.0.0', 0) self._stub = None # Use a new event loop to ensure isolated tests self._service = MagmaService('test', loop=asyncio.new_event_loop()) # Clear the global event loop so tests rely only on the event loop that # was manually set asyncio.set_event_loop(None)
def setUp(self): ServiceRegistry.add_service('test', '0.0.0.0', 0) self._stub = None self._loop = asyncio.new_event_loop() # Use a new event loop to ensure isolated tests self._service = MagmaService( name='test', empty_mconfig=mconfigs_pb2.MagmaD(), loop=self._loop, ) asyncio.set_event_loop(self._service.loop)
def main(): """ main() for redirectd Initializes the scribe logger, starts the server threads """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) scribe_logger = None if service.config.get('scribe_logging_enabled', False): scribe_logger = RedirectScribeLogger(service.loop) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, scribe_logger, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for redirectd. Starts the server threads. """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
def get_unexpected_restart_summary(self): service = MagmaService('magmad', mconfigs_pb2.MagmaD()) service_poller = ServicePoller(service.loop, service.config) service_poller.start() asyncio.set_event_loop(service.loop) # noinspection PyProtectedMember # pylint: disable=protected-access async def fetch_info(): restart_frequencies = {} await service_poller._get_service_info() for service_name in service_poller.service_info.keys(): restarts = int( UNEXPECTED_SERVICE_RESTARTS .labels(service_name=service_name) ._value.get(), ) restart_frequencies[service_name] = RestartFrequency( count=restarts, time_interval='', ) return restart_frequencies return service.loop.run_until_complete(fetch_info())
def main(): """ Top-level function for enodebd """ service = MagmaService('enodebd') # State machine manager for tracking multiple connected eNB devices. state_machine_manager = StateMachineManager(service) # Statistics manager stats_mgr = StatsManager(state_machine_manager) stats_mgr.run() # Start TR-069 thread server_thread = Thread(target=tr069_server, args=(state_machine_manager, ), daemon=True) server_thread.start() # Add all servicers to the server enodebd_servicer = EnodebdRpcServicer(state_machine_manager) enodebd_servicer.add_to_server(service.rpc_server) # Register function to get service status def get_enodebd_status(): return get_service_status_old(state_machine_manager) service.register_get_status_callback(get_enodebd_status) # Register a callback function for GetOperationalStates service303 function def get_enodeb_operational_states() -> List[State]: return get_operational_states(state_machine_manager) service.register_operational_states_callback(get_enodeb_operational_states) # Set eNodeBD iptables rules due to exposing public IP to eNodeB service.loop.create_task(set_enodebd_iptables_rule()) # Run the service loop service.run() # Cleanup the service service.close()
class Service303Tests(TestCase): """ Tests for the MagmaService and the Service303 interface """ @mock.patch('time.time', mock.MagicMock(return_value=12345)) def setUp(self): ServiceRegistry.add_service('test', '0.0.0.0', 0) self._stub = None # Use a new event loop to ensure isolated tests self._service = MagmaService('test', loop=asyncio.new_event_loop()) # Clear the global event loop so tests rely only on the event loop that # was manually set asyncio.set_event_loop(None) def test_service_run(self): """ Test if the service starts and stops gracefully. """ self.assertEqual(self._service.state, ServiceInfo.STARTING) # Start the service and pause the loop self._service.loop.stop() self._service.run() self.assertEqual(self._service.state, ServiceInfo.ALIVE) # Create a rpc stub and query the Service303 interface ServiceRegistry.add_service('test', '0.0.0.0', self._service.port) channel = ServiceRegistry.get_rpc_channel('test', ServiceRegistry.LOCAL) self._stub = Service303Stub(channel) info = ServiceInfo(name='test', version='0.0.0', state=ServiceInfo.ALIVE, health=ServiceInfo.APP_HEALTHY, start_time_secs=12345) self.assertEqual(self._stub.GetServiceInfo(Void()), info) # Stop the service self._stub.StopService(Void()) self._service.loop.run_forever() self.assertEqual(self._service.state, ServiceInfo.STOPPED)
def main(): """Start monitord""" manual_ping_targets = {} service = MagmaService('monitord', mconfigs_pb2.MonitorD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config) # Monitoring thread loop mtr_interface = load_service_config("monitord")["mtr_interface"] # Add manual IP targets from yml file try: targets = load_service_config("monitord")["ping_targets"] for target, data in targets.items(): ip_string = data.get("ip") if ip_string: ip = IPAddress( version=IPAddress.IPV4, address=str.encode(ip_string), ) logging.debug( 'Adding %s:%s:%s to ping target', target, ip.version, ip.address, ) manual_ping_targets[target] = ip except KeyError: logging.warning("No ping targets configured") cpe_monitor = CpeMonitoringModule() cpe_monitor.set_manually_configured_targets(manual_ping_targets) icmp_monitor = ICMPJob( cpe_monitor, service.mconfig.polling_interval, service.loop, mtr_interface, ) icmp_monitor.start() # Register a callback function for GetOperationalStates service.register_operational_states_callback( lambda: _get_serialized_subscriber_states(cpe_monitor), ) # Run the service loop service.run() # Cleanup the service service.close()
def test_service_cfg_parsing(self): """ Test the parsing of the service config file for enodebd.yml""" service = MagmaService('enodebd', mconfigs_pb2.EnodebD()) service_cfg = service.config service_cfg_1 = self._get_service_config() service_cfg_1['web_ui_enable_list'] = [] service_cfg_1[FreedomFiOneConfigurationInitializer.SAS_KEY][ SASParameters.SAS_UID] = "INVALID_ID" service_cfg_1[FreedomFiOneConfigurationInitializer.SAS_KEY][ SASParameters.SAS_CERT_SUBJECT] = "INVALID_CERT_SUBJECT" self.assertEqual(service_cfg, service_cfg_1)
def test_service_cfg_parsing(self): """ Test the parsing of the service config file for enodebd.yml""" self.maxDiff = None service = MagmaService('enodebd', mconfigs_pb2.EnodebD()) service_cfg = service.config service_cfg_1 = get_service_config() service_cfg_1['web_ui_enable_list'] = [] service_cfg_1["prim_src"] = "GNSS" service_cfg_1[SAS_KEY][SASParameters.SAS_UID] = "INVALID_ID" service_cfg_1[SAS_KEY][ SASParameters.SAS_CERT_SUBJECT] = "INVALID_CERT_SUBJECT" self.assertDictEqual(service_cfg, service_cfg_1)
def _collect_service_metrics(): config = MagmaService('magmad', mconfigs_pb2.MagmaD()).config magma_services = [ "magma@" + service for service in config['magma_services'] ] non_magma_services = ["sctpd", "openvswitch-switch"] for service in magma_services + non_magma_services: cmd = [ "systemctl", "show", service, "--property=MainPID,MemoryCurrent,MemoryAccounting,MemoryLimit" ] # TODO(@wallyrb): Move away from subprocess and use psystemd output = subprocess.check_output(cmd) output_str = str(output, "utf-8").strip().replace("MainPID=", "").replace( "MemoryCurrent=", "").replace("MemoryAccounting=", "").replace("MemoryLimit=", "") properties = output_str.split("\n") pid = int(properties[0]) memory = properties[1] memory_accounting = properties[2] memory_limit = properties[3] if pid != 0: try: p = psutil.Process(pid=pid) cpu_percentage = p.cpu_percent(interval=1) except psutil.NoSuchProcess: logging.warning( "When collecting CPU usage for service %s: Process with PID %d no longer exists.", service, pid) continue else: _counter_set( SERVICE_CPU_PERCENTAGE.labels(service_name=service, ), cpu_percentage, ) if not memory.isnumeric(): continue if memory_accounting == "yes": _counter_set( SERVICE_MEMORY_USAGE.labels(service_name=service, ), int(memory), ) if memory_limit.isnumeric(): _counter_set( SERVICE_MEMORY_PERCENTAGE.labels(service_name=service, ), int(memory) / int(memory_limit), )
def main(): """ main() for subscriberdb """ service = MagmaService('policydb', mconfigs_pb2.PolicyDB()) # Add all servicers to the server chan = ServiceRegistry.get_rpc_channel('subscriberdb', ServiceRegistry.LOCAL) subscriberdb_stub = SubscriberDBStub(chan) session_servicer = SessionRpcServicer(service.mconfig, subscriberdb_stub) session_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: stream = StreamerClient( { 'policydb': PolicyDBStreamerCallback(), 'rule_mappings': RuleMappingsStreamerCallback(), }, service.loop, ) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for subscriberdb """ service = MagmaService('subscriberdb') # Initialize a store to keep all subscriber data. store = SqliteStore(service.config['db_path'], loop=service.loop) # Initialize the processor processor = Processor(store, get_default_sub_profile(service), service.mconfig.sub_profiles, service.mconfig.lte_auth_op, service.mconfig.lte_auth_amf) # Add all servicers to the server subscriberdb_servicer = SubscriberDBRpcServicer(store) subscriberdb_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: callback = SubscriberDBStreamerCallback(store, service.loop) stream = StreamerClient({"subscriberdb": callback}, service.loop) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Wait until the datastore is populated by addition or resync before # listening for clients. def serve(): # Waiting for subscribers to be added to store yield from store.on_ready() if service.config['s6a_over_grpc']: s6a_proxy_servicer = S6aProxyRpcServicer(processor) s6a_proxy_servicer.add_to_server(service.rpc_server) else: base_manager = base.BaseApplication( service.config['mme_realm'], service.config['mme_host_name'], service.config['mme_host_address'], ) s6a_manager = _get_s6a_manager(service, processor) base_manager.register(s6a_manager) # Setup the Diameter/s6a MME s6a_server = service.loop.create_server( lambda: S6aServer(base_manager, s6a_manager, service.config['mme_realm'], service.config['mme_host_name'], loop=service.loop), service.config['host_address'], service.config['mme_port']) asyncio.ensure_future(s6a_server, loop=service.loop) asyncio.ensure_future(serve(), loop=service.loop) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for gateway state replication service """ service = MagmaService('state', mconfigs_pb2.State()) # Optionally pipe errors to Sentry sentry_init() # _grpc_client_manager to manage grpc client recycling grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Garbage collector propagates state deletions back to Orchestrator garbage_collector = GarbageCollector(service, grpc_client_manager) # Start state replication loop state_manager = StateReplicator(service, garbage_collector, grpc_client_manager) state_manager.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Main co-routine for metricsd :return: None """ # Get service config service = MagmaService('metricsd') services = service.config['services'] collect_interval = service.config['collect_interval'] sync_interval = service.config['sync_interval'] grpc_timeout = service.config['grpc_timeout'] queue_length = service.config['queue_length'] loop = service.loop # Create local metrics collector collector = MetricsCollector(services, collect_interval, sync_interval, grpc_timeout, queue_length, loop) # Start poll and sync loops collector.run() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Top-level function for health service """ service = MagmaService('health', None) # Optionally pipe errors to Sentry sentry_init() # Service state wrapper obj service_state = ServiceStateWrapper() # Load service YML config state_recovery_config = service.config["state_recovery"] services_check = state_recovery_config["services_check"] polling_interval = int(state_recovery_config["interval_check_mins"]) * 60 restart_threshold = state_recovery_config["restart_threshold"] snapshots_dir = state_recovery_config["snapshots_dir"] redis_dump_src = load_service_config("redis").get("dir", "/var/opt/magma") state_recovery_job = StateRecoveryJob(service_state=service_state, polling_interval=polling_interval, services_check=services_check, restart_threshold=restart_threshold, redis_dump_src=redis_dump_src, snapshots_dir=snapshots_dir, service_loop=service.loop) state_recovery_job.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for smsd """ service = MagmaService('smsd', None) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) directoryd_chan = ServiceRegistry.get_rpc_channel( 'directoryd', ServiceRegistry.LOCAL, ) mme_chan = ServiceRegistry.get_rpc_channel( 'sms_mme_service', ServiceRegistry.LOCAL, ) smsd_chan = ServiceRegistry.get_rpc_channel('smsd', ServiceRegistry.CLOUD) # Add all servicers to the server smsd_relay = SmsRelay( service.loop, GatewayDirectoryServiceStub(directoryd_chan), SMSOrc8rGatewayServiceStub(mme_chan), SmsDStub(smsd_chan), ) smsd_relay.add_to_server(service.rpc_server) smsd_relay.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): service = MagmaService('policydb', mconfigs_pb2.PolicyDB()) apn_rules_dict = ApnRuleAssignmentsDict() assignments_dict = RuleAssignmentsDict() basenames_dict = BaseNameDict() rating_groups_dict = RatingGroupsDict() sessiond_chan = ServiceRegistry.get_rpc_channel('sessiond', ServiceRegistry.LOCAL) session_mgr_stub = LocalSessionManagerStub(sessiond_chan) sessiond_stub = SessionProxyResponderStub(sessiond_chan) reauth_handler = ReAuthHandler(assignments_dict, sessiond_stub) # Add all servicers to the server session_servicer = SessionRpcServicer(service.mconfig, rating_groups_dict, basenames_dict, apn_rules_dict) session_servicer.add_to_server(service.rpc_server) orc8r_chan = ServiceRegistry.get_rpc_channel('policydb', ServiceRegistry.CLOUD) policy_stub = PolicyAssignmentControllerStub(orc8r_chan) policy_servicer = PolicyRpcServicer(reauth_handler, basenames_dict, policy_stub) policy_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: stream = StreamerClient( { 'policydb': PolicyDBStreamerCallback(), 'apn_rule_mappings': ApnRuleMappingsStreamerCallback( session_mgr_stub, basenames_dict, apn_rules_dict, ), 'rule_mappings': RuleMappingsStreamerCallback( reauth_handler, basenames_dict, assignments_dict, apn_rules_dict, ), 'rating_groups': RatingGroupsStreamerCallback( rating_groups_dict), }, service.loop, ) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Run the service loop service.run() # Cleanup the service service.close()
def test_service_cfg_parsing(self): """ Test the parsing of the service config file for enodebd.yml""" self.maxDiff = None service = MagmaService('enodebd', mconfigs_pb2.EnodebD()) service_cfg = service.config service_cfg["sas"]["sas_server_url"] = TEST_SAS_URL service_cfg1 = _get_service_config() service_cfg1['web_ui_enable_list'] = [] service_cfg1['prim_src'] = 'GNSS' service_cfg1[SAS_KEY][SASParameters.SAS_UID] = 'INVALID_ID' service_cfg1[SAS_KEY][ SASParameters.SAS_CERT_SUBJECT] = 'INVALID_CERT_SUBJECT' service_cfg1['print_grpc_payload'] = False self.assertDictEqual(service_cfg, service_cfg1)
def main(): """ main() for ctraced """ service = MagmaService('ctraced', CtraceD()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Loads the Ryu apps we want to run from the config file. This should exit on keyboard interrupt. """ # Run asyncio loop in a greenthread so we can evaluate other eventlets # TODO: Remove once Ryu migrates to asyncio asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy()) service = MagmaService('pipelined') service_config = service.config if environment.is_dev_mode(): of_rest_server.configure(service_config) # Set Ryu config params cfg.CONF.ofp_listen_host = "127.0.0.1" # Load the ryu apps service_manager = ServiceManager(service) service_manager.load() def callback(returncode): if returncode != 0: logging.error( "Failed to set MASQUERADE: %d", returncode ) if service.mconfig.nat_enabled: call_process('iptables -t nat -A POSTROUTING -o %s -j MASQUERADE' % service.config['nat_iface'], callback, service.loop ) service.loop.create_task(monitor_ifaces( service.config['monitored_ifaces'], service.loop), ) manager = AppManager.get_instance() # Add pipelined rpc servicer pipelined_srv = PipelinedRpcServicer( service.loop, manager.applications.get('MeterStatsController', None), manager.applications.get('EnforcementController', None), manager.applications.get('EnforcementStatsController', None), manager.applications.get('DPIController', None), service_manager) pipelined_srv.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Main co-routine for linkstatsd :return: None """ parser = create_parser() args = parser.parse_args() # set up logging logging.basicConfig( level=logging.DEBUG if args.verbose else logging.INFO, format= "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s", ) # Get service config service = MagmaService("linkstatsd", wifi_mconfigs_pb2.Linkstatsd()) # Create stats collector collector = LinkstatsCollector(service.loop, service.config, service.mconfig) # Create network state manager state_mgr = NetworkStateManager(service.loop, service.config, service.mconfig) # Start state manager's state-updating loop state_mgr.start() # Register callback function to sync state with the cloud service.register_get_status_callback(state_mgr.get_state) # Start collector loop collector.start_collector() if SDWatchdog.has_notify(): # Create systemd watchdog sdwatchdog = SDWatchdog([collector, state_mgr], update_status=True) # Start watchdog loop service.loop.create_task(sdwatchdog.run()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for monitord service""" service = MagmaService('monitord', mconfigs_pb2.MonitorD()) # Monitoring thread loop icmp_monitor = ICMPMonitoring(service.mconfig.polling_interval, service.loop) icmp_monitor.start() # Register a callback function for GetOperationalStates service.register_operational_states_callback( lambda: serialize_subscriber_states(icmp_monitor.get_subscriber_state( ))) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for monitord service""" manual_ping_targets = {} service = MagmaService('monitord', mconfigs_pb2.MonitorD()) # Optionally pipe errors to Sentry sentry_init() # Monitoring thread loop mtr_interface = load_service_config("monitord")["mtr_interface"] # Add manual IP targets from yml file try: targets = load_service_config("monitord")["ping_targets"] for target, data in targets.items(): if "ip" in data: ip = IPAddress(version=IPAddress.IPV4, address=str.encode(data["ip"])) logging.debug( 'Adding {}:{}:{} to ping target'.format(target, ip.version, ip.address)) manual_ping_targets[target] = ip except KeyError: logging.warning("No ping targets configured") cpe_monitor = CpeMonitoringModule() cpe_monitor.set_manually_configured_targets(manual_ping_targets) icmp_monitor = ICMPMonitoring(cpe_monitor, service.mconfig.polling_interval, service.loop, mtr_interface) icmp_monitor.start() # Register a callback function for GetOperationalStates service.register_operational_states_callback( _get_serialized_subscriber_states(cpe_monitor)) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for captive_portal service """ service = MagmaService('captive_portal', None) # Add all servicers to the server session_servicer = SessionRpcServicer(service.config) session_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for hello service """ service = MagmaService('hello') # Add all servicers to the server hello_servicer = HelloRpcServicer() hello_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', EventD()) event_validator = EventValidator(service.config) eventd_servicer = EventDRpcServicer(service.config, event_validator) eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', None) eventd_servicer = EventDRpcServicer(service.config) eventd_servicer.load_specs_from_registry() eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()