def main(): """ Main co-routine for metricsd :return: None """ # Get service config service = MagmaService('metricsd') services = service.config['services'] collect_interval = service.config['collect_interval'] sync_interval = service.config['sync_interval'] grpc_timeout = service.config['grpc_timeout'] queue_length = service.config['queue_length'] loop = service.loop # Create local metrics collector collector = MetricsCollector(services, collect_interval, sync_interval, grpc_timeout, queue_length, loop) # Start poll and sync loops collector.run() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for redirectd Initializes the scribe logger, starts the server threads """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) scribe_logger = None if service.config.get('scribe_logging_enabled', False): scribe_logger = RedirectScribeLogger(service.loop) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, scribe_logger, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
class Service303Tests(TestCase): """ Tests for the MagmaService and the Service303 interface """ @mock.patch('time.time', mock.MagicMock(return_value=12345)) def setUp(self): ServiceRegistry.add_service('test', '0.0.0.0', 0) self._stub = None self._loop = asyncio.new_event_loop() # Use a new event loop to ensure isolated tests self._service = MagmaService( name='test', empty_mconfig=mconfigs_pb2.MagmaD(), loop=self._loop, ) asyncio.set_event_loop(self._service.loop) @mock.patch( 'magma.common.service_registry.ServiceRegistry.get_proxy_config', ) def test_service_run(self, mock_get_proxy_config): """ Test if the service starts and stops gracefully. """ self.assertEqual(self._service.state, ServiceInfo.STARTING) mock_get_proxy_config.return_value = { 'cloud_address': '127.0.0.1', 'proxy_cloud_connections': True, } # Start the service and pause the loop self._service.loop.stop() self._service.run() asyncio.set_event_loop(self._service.loop) self._service.log_counter._periodic_task.cancel() self.assertEqual(self._service.state, ServiceInfo.ALIVE) # Create a rpc stub and query the Service303 interface ServiceRegistry.add_service('test', '0.0.0.0', self._service.port) channel = ServiceRegistry.get_rpc_channel( 'test', ServiceRegistry.LOCAL, ) self._stub = Service303Stub(channel) info = ServiceInfo( name='test', version='0.0.0', state=ServiceInfo.ALIVE, health=ServiceInfo.APP_HEALTHY, start_time_secs=12345, ) self.assertEqual(self._stub.GetServiceInfo(Void()), info) # Stop the service self._stub.StopService(Void()) self._service.loop.run_forever() self.assertEqual(self._service.state, ServiceInfo.STOPPED)
def main(): """ main() for redirectd. Starts the server threads. """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for subscriberdb """ service = MagmaService('policydb', mconfigs_pb2.PolicyDB()) # Add all servicers to the server chan = ServiceRegistry.get_rpc_channel('subscriberdb', ServiceRegistry.LOCAL) subscriberdb_stub = SubscriberDBStub(chan) session_servicer = SessionRpcServicer(service.mconfig, subscriberdb_stub) session_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: stream = StreamerClient( { 'policydb': PolicyDBStreamerCallback(), 'rule_mappings': RuleMappingsStreamerCallback(), }, service.loop, ) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for subscriberdb """ service = MagmaService('subscriberdb') # Initialize a store to keep all subscriber data. store = SqliteStore(service.config['db_path'], loop=service.loop) # Initialize the processor processor = Processor(store, get_default_sub_profile(service), service.mconfig.sub_profiles, service.mconfig.lte_auth_op, service.mconfig.lte_auth_amf) # Add all servicers to the server subscriberdb_servicer = SubscriberDBRpcServicer(store) subscriberdb_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: callback = SubscriberDBStreamerCallback(store, service.loop) stream = StreamerClient({"subscriberdb": callback}, service.loop) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Wait until the datastore is populated by addition or resync before # listening for clients. def serve(): # Waiting for subscribers to be added to store yield from store.on_ready() if service.config['s6a_over_grpc']: s6a_proxy_servicer = S6aProxyRpcServicer(processor) s6a_proxy_servicer.add_to_server(service.rpc_server) else: base_manager = base.BaseApplication( service.config['mme_realm'], service.config['mme_host_name'], service.config['mme_host_address'], ) s6a_manager = _get_s6a_manager(service, processor) base_manager.register(s6a_manager) # Setup the Diameter/s6a MME s6a_server = service.loop.create_server( lambda: S6aServer(base_manager, s6a_manager, service.config['mme_realm'], service.config['mme_host_name'], loop=service.loop), service.config['host_address'], service.config['mme_port']) asyncio.ensure_future(s6a_server, loop=service.loop) asyncio.ensure_future(serve(), loop=service.loop) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for smsd """ service = MagmaService('smsd', None) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) directoryd_chan = ServiceRegistry.get_rpc_channel( 'directoryd', ServiceRegistry.LOCAL, ) mme_chan = ServiceRegistry.get_rpc_channel( 'sms_mme_service', ServiceRegistry.LOCAL, ) smsd_chan = ServiceRegistry.get_rpc_channel('smsd', ServiceRegistry.CLOUD) # Add all servicers to the server smsd_relay = SmsRelay( service.loop, GatewayDirectoryServiceStub(directoryd_chan), SMSOrc8rGatewayServiceStub(mme_chan), SmsDStub(smsd_chan), ) smsd_relay.add_to_server(service.rpc_server) smsd_relay.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Top-level function for health service """ service = MagmaService('health', None) # Optionally pipe errors to Sentry sentry_init() # Service state wrapper obj service_state = ServiceStateWrapper() # Load service YML config state_recovery_config = service.config["state_recovery"] services_check = state_recovery_config["services_check"] polling_interval = int(state_recovery_config["interval_check_mins"]) * 60 restart_threshold = state_recovery_config["restart_threshold"] snapshots_dir = state_recovery_config["snapshots_dir"] redis_dump_src = load_service_config("redis").get("dir", "/var/opt/magma") state_recovery_job = StateRecoveryJob(service_state=service_state, polling_interval=polling_interval, services_check=services_check, restart_threshold=restart_threshold, redis_dump_src=redis_dump_src, snapshots_dir=snapshots_dir, service_loop=service.loop) state_recovery_job.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for gateway state replication service """ service = MagmaService('state', mconfigs_pb2.State()) # Optionally pipe errors to Sentry sentry_init() # _grpc_client_manager to manage grpc client recycling grpc_client_manager = GRPCClientManager( service_name="state", service_stub=StateServiceStub, max_client_reuse=60, ) # Garbage collector propagates state deletions back to Orchestrator garbage_collector = GarbageCollector(service, grpc_client_manager) # Start state replication loop state_manager = StateReplicator(service, garbage_collector, grpc_client_manager) state_manager.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Top-level function for enodebd """ service = MagmaService('enodebd', mconfigs_pb2.EnodebD()) logger.init() # Optionally pipe errors to Sentry sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config) # State machine manager for tracking multiple connected eNB devices. state_machine_manager = StateMachineManager(service) # Statistics manager stats_mgr = StatsManager(state_machine_manager) stats_mgr.run() # Start TR-069 thread server_thread = Thread( target=tr069_server, args=(state_machine_manager, ), daemon=True, ) server_thread.start() print_grpc_payload = service.config.get('print_grpc_payload', False) # Add all servicers to the server enodebd_servicer = EnodebdRpcServicer( state_machine_manager, print_grpc_payload, ) enodebd_servicer.add_to_server(service.rpc_server) # Register function to get service status def get_enodebd_status(): return get_service_status_old(state_machine_manager) service.register_get_status_callback(get_enodebd_status) # Register a callback function for GetOperationalStates service303 function def get_enodeb_operational_states() -> List[State]: return get_operational_states( state_machine_manager, service.mconfig, print_grpc_payload, ) service.register_operational_states_callback(get_enodeb_operational_states) # Set eNodeBD iptables rules due to exposing public IP to eNodeB service.loop.create_task(set_enodebd_iptables_rule()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): service = MagmaService('policydb', mconfigs_pb2.PolicyDB()) apn_rules_dict = ApnRuleAssignmentsDict() assignments_dict = RuleAssignmentsDict() basenames_dict = BaseNameDict() rating_groups_dict = RatingGroupsDict() sessiond_chan = ServiceRegistry.get_rpc_channel('sessiond', ServiceRegistry.LOCAL) session_mgr_stub = LocalSessionManagerStub(sessiond_chan) sessiond_stub = SessionProxyResponderStub(sessiond_chan) reauth_handler = ReAuthHandler(assignments_dict, sessiond_stub) # Add all servicers to the server session_servicer = SessionRpcServicer(service.mconfig, rating_groups_dict, basenames_dict, apn_rules_dict) session_servicer.add_to_server(service.rpc_server) orc8r_chan = ServiceRegistry.get_rpc_channel('policydb', ServiceRegistry.CLOUD) policy_stub = PolicyAssignmentControllerStub(orc8r_chan) policy_servicer = PolicyRpcServicer(reauth_handler, basenames_dict, policy_stub) policy_servicer.add_to_server(service.rpc_server) # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: stream = StreamerClient( { 'policydb': PolicyDBStreamerCallback(), 'apn_rule_mappings': ApnRuleMappingsStreamerCallback( session_mgr_stub, basenames_dict, apn_rules_dict, ), 'rule_mappings': RuleMappingsStreamerCallback( reauth_handler, basenames_dict, assignments_dict, apn_rules_dict, ), 'rating_groups': RatingGroupsStreamerCallback( rating_groups_dict), }, service.loop, ) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for ctraced """ service = MagmaService('ctraced', CtraceD()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Loads the Ryu apps we want to run from the config file. This should exit on keyboard interrupt. """ # Run asyncio loop in a greenthread so we can evaluate other eventlets # TODO: Remove once Ryu migrates to asyncio asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy()) service = MagmaService('pipelined') service_config = service.config if environment.is_dev_mode(): of_rest_server.configure(service_config) # Set Ryu config params cfg.CONF.ofp_listen_host = "127.0.0.1" # Load the ryu apps service_manager = ServiceManager(service) service_manager.load() def callback(returncode): if returncode != 0: logging.error( "Failed to set MASQUERADE: %d", returncode ) if service.mconfig.nat_enabled: call_process('iptables -t nat -A POSTROUTING -o %s -j MASQUERADE' % service.config['nat_iface'], callback, service.loop ) service.loop.create_task(monitor_ifaces( service.config['monitored_ifaces'], service.loop), ) manager = AppManager.get_instance() # Add pipelined rpc servicer pipelined_srv = PipelinedRpcServicer( service.loop, manager.applications.get('MeterStatsController', None), manager.applications.get('EnforcementController', None), manager.applications.get('EnforcementStatsController', None), manager.applications.get('DPIController', None), service_manager) pipelined_srv.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """Start monitord""" manual_ping_targets = {} service = MagmaService('monitord', mconfigs_pb2.MonitorD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config) # Monitoring thread loop mtr_interface = load_service_config("monitord")["mtr_interface"] # Add manual IP targets from yml file try: targets = load_service_config("monitord")["ping_targets"] for target, data in targets.items(): ip_string = data.get("ip") if ip_string: ip = IPAddress( version=IPAddress.IPV4, address=str.encode(ip_string), ) logging.debug( 'Adding %s:%s:%s to ping target', target, ip.version, ip.address, ) manual_ping_targets[target] = ip except KeyError: logging.warning("No ping targets configured") cpe_monitor = CpeMonitoringModule() cpe_monitor.set_manually_configured_targets(manual_ping_targets) icmp_monitor = ICMPJob( cpe_monitor, service.mconfig.polling_interval, service.loop, mtr_interface, ) icmp_monitor.start() # Register a callback function for GetOperationalStates service.register_operational_states_callback( lambda: _get_serialized_subscriber_states(cpe_monitor), ) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', EventD()) event_validator = EventValidator(service.config) eventd_servicer = EventDRpcServicer(service.config, event_validator) eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for Directoryd """ service = MagmaService('directoryd', mconfigs_pb2.DirectoryD()) # Add servicer to the server gateway_directory_servicer = GatewayDirectoryServiceRpcServicer() gateway_directory_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', None) eventd_servicer = EventDRpcServicer(service.config) eventd_servicer.load_specs_from_registry() eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for captive_portal service """ service = MagmaService('captive_portal', None) # Add all servicers to the server session_servicer = SessionRpcServicer(service.config) session_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for subscriberdb """ service = MagmaService('policydb') # Start a background thread to stream updates from the cloud callback = PolicyDBStreamerCallback(service.loop) stream = StreamerClient({"policydb": callback}, service.loop) stream.start() # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for hello service """ service = MagmaService('hello') # Add all servicers to the server hello_servicer = HelloRpcServicer() hello_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for ctraced """ service = MagmaService('ctraced', CtraceD()) trace_manager = TraceManager(service.config) ctraced_servicer = CtraceDRpcServicer(trace_manager) ctraced_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for MobilityD """ service = MagmaService('mobilityd', mconfigs_pb2.MobilityD()) # Add all servicers to the server mobility_service_servicer = MobilityServiceRpcServicer( service.mconfig, service.config) mobility_service_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for Directoryd """ service = MagmaService('directoryd', mconfigs_pb2.DirectoryD()) # Add all servicers to the server directory_service_servicer = DirectoryServiceRpcServicer( service.mconfig, service.config) directory_service_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for Directoryd """ service = MagmaService('directoryd', mconfigs_pb2.DirectoryD()) service_config = service.config # Add servicer to the server gateway_directory_servicer = GatewayDirectoryServiceRpcServicer( service_config.get('print_grpc_payload', False)) gateway_directory_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', None) eventd_servicer = EventDRpcServicer( service.config['fluent_bit_port'], service.config['tcp_timeout'], ) eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for eventd """ service = MagmaService('eventd', EventD()) # Optionally pipe errors to Sentry sentry_init() event_validator = EventValidator(service.config) eventd_servicer = EventDRpcServicer(service.config, event_validator) eventd_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for subscriberdb """ service = MagmaService('policydb') # Start a background thread to stream updates from the cloud if service.config['enable_streaming']: callback = PolicyDBStreamerCallback(service.loop) stream = StreamerClient({"policydb": callback}, service.loop) stream.start() else: logging.info('enable_streaming set to False. Streamer disabled!') # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for MobilityD """ service = MagmaService('mobilityd', mconfigs_pb2.MobilityD()) # Load service configs and mconfig config = service.config mconfig = service.mconfig multi_apn = config.get('multi_apn', mconfig.multi_apn_ip_alloc) static_ip_enabled = config.get('static_ip', mconfig.static_ip_enabled) allocator_type = mconfig.ip_allocator_type dhcp_iface = config.get('dhcp_iface', 'dhcp0') dhcp_retry_limit = config.get('retry_limit', 300) # TODO: consider adding gateway mconfig to decide whether to # persist to Redis client = get_default_client() store = MobilityStore(client, config.get('persist_to_redis', False), config.get('redis_port', 6380)) chan = ServiceRegistry.get_rpc_channel('subscriberdb', ServiceRegistry.LOCAL) ipv4_allocator = _get_ipv4_allocator(store, allocator_type, static_ip_enabled, multi_apn, dhcp_iface, dhcp_retry_limit, SubscriberDBStub(chan)) # Init IPv6 allocator, for now only IP_POOL mode is supported for IPv6 ipv6_allocation_type = config['ipv6_ip_allocator_type'] ipv6_allocator = IPv6AllocatorPool( store=store, session_prefix_alloc_mode=ipv6_allocation_type) # Load IPAddressManager ip_address_man = IPAddressManager(ipv4_allocator, ipv6_allocator, store) # Add all servicers to the server mobility_service_servicer = MobilityServiceRpcServicer( ip_address_man, mconfig.ip_block, config.get('ipv6_prefix_block')) mobility_service_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ Main co-routine for linkstatsd :return: None """ parser = create_parser() args = parser.parse_args() # set up logging logging.basicConfig( level=logging.DEBUG if args.verbose else logging.INFO, format= "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s", ) # Get service config service = MagmaService("linkstatsd", wifi_mconfigs_pb2.Linkstatsd()) # Create stats collector collector = LinkstatsCollector(service.loop, service.config, service.mconfig) # Create network state manager state_mgr = NetworkStateManager(service.loop, service.config, service.mconfig) # Start state manager's state-updating loop state_mgr.start() # Register callback function to sync state with the cloud service.register_get_status_callback(state_mgr.get_state) # Start collector loop collector.start_collector() if SDWatchdog.has_notify(): # Create systemd watchdog sdwatchdog = SDWatchdog([collector, state_mgr], update_status=True) # Start watchdog loop service.loop.create_task(sdwatchdog.run()) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for MobilityD """ service = MagmaService('mobilityd', mconfigs_pb2.MobilityD()) chan = ServiceRegistry.get_rpc_channel('subscriberdb', ServiceRegistry.LOCAL) # Add all servicers to the server mobility_service_servicer = MobilityServiceRpcServicer( service.mconfig, service.config, SubscriberDBStub(chan)) mobility_service_servicer.add_to_server(service.rpc_server) # Run the service loop service.run() # Cleanup the service service.close()