예제 #1
0
def main():
    """
    main() for redirectd. Starts the server threads.
    """
    service = MagmaService('redirectd', mconfigs_pb2.RedirectD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    redirect_ip = get_service_config_value(
        'pipelined',
        'bridge_ip_address', None,
    )
    if redirect_ip is None:
        logging.error("ERROR bridge_ip_address not found in pipelined config")
        service.close()
        return

    http_port = service.config['http_port']
    exit_callback = get_exit_server_thread_callback(service)
    run_server_thread(run_flask, redirect_ip, http_port, exit_callback)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #2
0
파일: main.py 프로젝트: jaredmullane/docu_2
def main():
    """
    main() for gateway state replication service
    """
    service = MagmaService('state', mconfigs_pb2.State())

    # Optionally pipe errors to Sentry
    sentry_init()

    # _grpc_client_manager to manage grpc client recycling
    grpc_client_manager = GRPCClientManager(
        service_name="state",
        service_stub=StateServiceStub,
        max_client_reuse=60,
    )

    # Garbage collector propagates state deletions back to Orchestrator
    garbage_collector = GarbageCollector(service, grpc_client_manager)

    # Start state replication loop
    state_manager = StateReplicator(service, garbage_collector,
                                    grpc_client_manager)
    state_manager.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #3
0
파일: main.py 프로젝트: sdechi/magma
def main():
    """ main() for smsd """
    service = MagmaService('smsd', None)

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    directoryd_chan = ServiceRegistry.get_rpc_channel(
        'directoryd',
        ServiceRegistry.LOCAL,
    )
    mme_chan = ServiceRegistry.get_rpc_channel(
        'sms_mme_service',
        ServiceRegistry.LOCAL,
    )
    smsd_chan = ServiceRegistry.get_rpc_channel('smsd', ServiceRegistry.CLOUD)

    # Add all servicers to the server
    smsd_relay = SmsRelay(
        service.loop,
        GatewayDirectoryServiceStub(directoryd_chan),
        SMSOrc8rGatewayServiceStub(mme_chan),
        SmsDStub(smsd_chan),
    )
    smsd_relay.add_to_server(service.rpc_server)
    smsd_relay.start()

    # Run the service loop
    service.run()
    # Cleanup the service
    service.close()
예제 #4
0
파일: main.py 프로젝트: jaredmullane/docu_2
def main():
    """
    Top-level function for health service
    """
    service = MagmaService('health', None)

    # Optionally pipe errors to Sentry
    sentry_init()

    # Service state wrapper obj
    service_state = ServiceStateWrapper()

    # Load service YML config
    state_recovery_config = service.config["state_recovery"]
    services_check = state_recovery_config["services_check"]
    polling_interval = int(state_recovery_config["interval_check_mins"]) * 60
    restart_threshold = state_recovery_config["restart_threshold"]
    snapshots_dir = state_recovery_config["snapshots_dir"]

    redis_dump_src = load_service_config("redis").get("dir", "/var/opt/magma")

    state_recovery_job = StateRecoveryJob(service_state=service_state,
                                          polling_interval=polling_interval,
                                          services_check=services_check,
                                          restart_threshold=restart_threshold,
                                          redis_dump_src=redis_dump_src,
                                          snapshots_dir=snapshots_dir,
                                          service_loop=service.loop)
    state_recovery_job.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #5
0
def main():
    """
    Top-level function for enodebd
    """
    service = MagmaService('enodebd', mconfigs_pb2.EnodebD())
    logger.init()

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name,
                sentry_mconfig=service.shared_mconfig.sentry_config)

    # State machine manager for tracking multiple connected eNB devices.
    state_machine_manager = StateMachineManager(service)

    # Statistics manager
    stats_mgr = StatsManager(state_machine_manager)
    stats_mgr.run()

    # Start TR-069 thread
    server_thread = Thread(
        target=tr069_server,
        args=(state_machine_manager, ),
        daemon=True,
    )
    server_thread.start()

    print_grpc_payload = service.config.get('print_grpc_payload', False)

    # Add all servicers to the server
    enodebd_servicer = EnodebdRpcServicer(
        state_machine_manager,
        print_grpc_payload,
    )
    enodebd_servicer.add_to_server(service.rpc_server)

    # Register function to get service status
    def get_enodebd_status():
        return get_service_status_old(state_machine_manager)

    service.register_get_status_callback(get_enodebd_status)

    # Register a callback function for GetOperationalStates service303 function
    def get_enodeb_operational_states() -> List[State]:
        return get_operational_states(
            state_machine_manager,
            service.mconfig,
            print_grpc_payload,
        )

    service.register_operational_states_callback(get_enodeb_operational_states)

    # Set eNodeBD iptables rules due to exposing public IP to eNodeB
    service.loop.create_task(set_enodebd_iptables_rule())

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #6
0
파일: main.py 프로젝트: willoughbyrm/magma
def main():
    service = MagmaService('policydb', mconfigs_pb2.PolicyDB())

    # Optionally pipe errors to Sentry
    sentry_init()

    apn_rules_dict = ApnRuleAssignmentsDict()
    assignments_dict = RuleAssignmentsDict()
    basenames_dict = BaseNameDict()
    rating_groups_dict = RatingGroupsDict()
    sessiond_chan = ServiceRegistry.get_rpc_channel('sessiond',
                                                    ServiceRegistry.LOCAL)
    session_mgr_stub = LocalSessionManagerStub(sessiond_chan)
    sessiond_stub = SessionProxyResponderStub(sessiond_chan)
    reauth_handler = ReAuthHandler(assignments_dict, sessiond_stub)

    # Add all servicers to the server
    session_servicer = SessionRpcServicer(service.mconfig, rating_groups_dict,
                                          basenames_dict, apn_rules_dict)
    session_servicer.add_to_server(service.rpc_server)

    orc8r_chan = ServiceRegistry.get_rpc_channel('policydb',
                                                 ServiceRegistry.CLOUD)
    policy_stub = PolicyAssignmentControllerStub(orc8r_chan)
    policy_servicer = PolicyRpcServicer(reauth_handler, basenames_dict,
                                        policy_stub)
    policy_servicer.add_to_server(service.rpc_server)

    # Start a background thread to stream updates from the cloud
    if service.config['enable_streaming']:
        stream = StreamerClient(
            {
                'policydb':
                PolicyDBStreamerCallback(),
                'apn_rule_mappings':
                ApnRuleMappingsStreamerCallback(
                    session_mgr_stub,
                    basenames_dict,
                    apn_rules_dict,
                ),
                'rating_groups':
                RatingGroupsStreamerCallback(rating_groups_dict),
            },
            service.loop,
        )
        stream.start()
    else:
        logging.info('enable_streaming set to False. Streamer disabled!')

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #7
0
파일: main.py 프로젝트: talkhasib/magma
def main():
    """Start monitord"""
    manual_ping_targets = {}
    service = MagmaService('monitord', mconfigs_pb2.MonitorD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name,
                sentry_mconfig=service.shared_mconfig.sentry_config)

    # Monitoring thread loop
    mtr_interface = load_service_config("monitord")["mtr_interface"]

    # Add manual IP targets from yml file
    try:
        targets = load_service_config("monitord")["ping_targets"]
        for target, data in targets.items():
            ip_string = data.get("ip")
            if ip_string:
                ip = IPAddress(
                    version=IPAddress.IPV4,
                    address=str.encode(ip_string),
                )
                logging.debug(
                    'Adding %s:%s:%s to ping target',
                    target,
                    ip.version,
                    ip.address,
                )
                manual_ping_targets[target] = ip
    except KeyError:
        logging.warning("No ping targets configured")

    cpe_monitor = CpeMonitoringModule()
    cpe_monitor.set_manually_configured_targets(manual_ping_targets)

    icmp_monitor = ICMPJob(
        cpe_monitor,
        service.mconfig.polling_interval,
        service.loop,
        mtr_interface,
    )
    icmp_monitor.start()

    # Register a callback function for GetOperationalStates
    service.register_operational_states_callback(
        lambda: _get_serialized_subscriber_states(cpe_monitor), )

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #8
0
def main():
    """ main() for eventd """
    service = MagmaService('eventd', EventD())

    # Optionally pipe errors to Sentry
    sentry_init()

    event_validator = EventValidator(service.config)
    eventd_servicer = EventDRpcServicer(service.config, event_validator)
    eventd_servicer.add_to_server(service.rpc_server)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #9
0
def main():
    """ main() for Directoryd """
    service = MagmaService('directoryd', mconfigs_pb2.DirectoryD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    service_config = service.config

    # Add servicer to the server
    gateway_directory_servicer = GatewayDirectoryServiceRpcServicer(
        service_config.get('print_grpc_payload', False), )
    gateway_directory_servicer.add_to_server(service.rpc_server)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #10
0
파일: main.py 프로젝트: traderose/magma
def main():
    """ main() for monitord service"""
    manual_ping_targets = {}
    service = MagmaService('monitord', mconfigs_pb2.MonitorD())

    # Optionally pipe errors to Sentry
    sentry_init()

    # Monitoring thread loop
    mtr_interface = load_service_config("monitord")["mtr_interface"]

    # Add manual IP targets from yml file
    try:
        targets = load_service_config("monitord")["ping_targets"]
        for target, data in targets.items():
            if "ip" in data:
                ip = IPAddress(version=IPAddress.IPV4,
                               address=str.encode(data["ip"]))
                logging.debug(
                    'Adding {}:{}:{} to ping target'.format(target, ip.version,
                                                            ip.address))
                manual_ping_targets[target] = ip
    except KeyError:
        logging.warning("No ping targets configured")

    cpe_monitor = CpeMonitoringModule()
    cpe_monitor.set_manually_configured_targets(manual_ping_targets)

    icmp_monitor = ICMPMonitoring(cpe_monitor,
                                  service.mconfig.polling_interval,
                                  service.loop, mtr_interface)
    icmp_monitor.start()

    # Register a callback function for GetOperationalStates
    service.register_operational_states_callback(
        _get_serialized_subscriber_states(cpe_monitor))

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #11
0
def main():
    """ main() for ctraced """
    service = MagmaService('ctraced', CtraceD())

    # Optionally pipe errors to Sentry
    sentry_init()

    orc8r_chan = ServiceRegistry.get_rpc_channel('ctraced',
                                                 ServiceRegistry.CLOUD)
    ctraced_stub = CallTraceControllerStub(orc8r_chan)

    trace_manager = TraceManager(service.config, ctraced_stub)

    ctraced_servicer = CtraceDRpcServicer(trace_manager)
    ctraced_servicer.add_to_server(service.rpc_server)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #12
0
def main():
    """
    Read configuration and run the Snooper job
    """
    # There is no mconfig for kernsnoopd
    service = MagmaService('kernsnoopd', None)

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    # Initialize and run the Snooper job
    Snooper(
        service.config['ebpf_programs'],
        max(MIN_COLLECT_INTERVAL, service.config['collect_interval']),
        ServiceRegistry,
        service.loop,
    )

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #13
0
def main():
    """
    Loads the Ryu apps we want to run from the config file.
    This should exit on keyboard interrupt.
    """

    # Run asyncio loop in a greenthread so we can evaluate other eventlets
    # TODO: Remove once Ryu migrates to asyncio
    asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())

    service = MagmaService('pipelined', mconfigs_pb2.PipelineD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    service_config = service.config

    if environment.is_dev_mode():
        of_rest_server.configure(service_config)

    # Set Ryu config params
    cfg.CONF.ofp_listen_host = "127.0.0.1"

    # override mconfig using local config.
    # TODO: move config compilation to separate module.
    enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
    service.config['enable_nat'] = enable_nat
    logging.info("Nat: %s", enable_nat)
    vlan_tag = service.config.get(
        'sgi_management_iface_vlan',
        service.mconfig.sgi_management_iface_vlan,
    )
    service.config['sgi_management_iface_vlan'] = vlan_tag

    sgi_ip = service.config.get(
        'sgi_management_iface_ip_addr',
        service.mconfig.sgi_management_iface_ip_addr,
    )
    service.config['sgi_management_iface_ip_addr'] = sgi_ip

    sgi_gateway_ip = service.config.get(
        'sgi_management_iface_gw',
        service.mconfig.sgi_management_iface_gw,
    )
    service.config['sgi_management_iface_gw'] = sgi_gateway_ip

    # Keep router mode off for smooth upgrade path
    service.config['dp_router_enabled'] = service.config.get(
        'dp_router_enabled',
        False,
    )
    if 'virtual_mac' not in service.config:
        if service.config['dp_router_enabled']:
            up_bridge_name = service.config.get(
                'uplink_bridge', UPLINK_OVS_BRIDGE_NAME,
            )
            mac_addr = get_if_hwaddr(up_bridge_name)
        else:
            mac_addr = get_if_hwaddr(service.config.get('bridge_name'))

        service.config['virtual_mac'] = mac_addr

    # this is not read from yml file.
    service.config['uplink_port'] = OFPP_LOCAL
    uplink_port_name = service.config.get('ovs_uplink_port_name', None)
    if enable_nat is False and uplink_port_name is not None:
        service.config['uplink_port'] = BridgeTools.get_ofport(
            uplink_port_name,
        )

    # header enrichment related configuration.
    service.config['proxy_port_name'] = PROXY_PORT_NAME
    he_enabled_flag = False
    if service.mconfig.he_config:
        he_enabled_flag = service.mconfig.he_config.enable_header_enrichment
    he_enabled = service.config.get('he_enabled', he_enabled_flag)
    service.config['he_enabled'] = he_enabled

    # monitoring related configuration
    mtr_interface = service.config.get('mtr_interface', None)
    if mtr_interface:
        mtr_ip = get_ip_from_if(mtr_interface)
        service.config['mtr_ip'] = mtr_ip

    # Load the ryu apps
    service_manager = ServiceManager(service)
    service_manager.load()

    def callback(returncode):
        if returncode != 0:
            logging.error(
                "Failed to set MASQUERADE: %d", returncode,
            )

    # TODO fix this hack for XWF
    if enable_nat is True or service.config.get('setup_type') == 'XWF':
        call_process(
            'iptables -t nat -A POSTROUTING -o %s -j MASQUERADE'
            % service.config['nat_iface'],
            callback,
            service.loop,
        )

    service.loop.create_task(
        monitor_ifaces(
            service.config['monitored_ifaces'],
        ),
    )

    manager = AppManager.get_instance()
    # Add pipelined rpc servicer
    pipelined_srv = PipelinedRpcServicer(
        service.loop,
        manager.applications.get('GYController', None),
        manager.applications.get('EnforcementController', None),
        manager.applications.get('EnforcementStatsController', None),
        manager.applications.get('DPIController', None),
        manager.applications.get('UEMacAddressController', None),
        manager.applications.get('CheckQuotaController', None),
        manager.applications.get('IPFIXController', None),
        manager.applications.get('VlanLearnController', None),
        manager.applications.get('TunnelLearnController', None),
        manager.applications.get('Classifier', None),
        manager.applications.get('InOutController', None),
        manager.applications.get('NGServiceController', None),
        service.config,
        service_manager,
    )
    pipelined_srv.add_to_server(service.rpc_server)

    if service.config['setup_type'] == 'CWF':
        bridge_ip = service.config['bridge_ip_address']
        has_quota_port = service.config['has_quota_port']
        no_quota_port = service.config['no_quota_port']

        def on_exit_server_thread():
            service.StopService(None, None)

        # For CWF start quota check servers
        start_check_quota_server(
            run_flask, bridge_ip, has_quota_port, True,
            on_exit_server_thread,
        )
        start_check_quota_server(
            run_flask, bridge_ip, no_quota_port, False,
            on_exit_server_thread,
        )

    if service.config['setup_type'] == 'LTE':
        polling_interval = service.config.get(
            'ovs_gtp_stats_polling_interval',
            MIN_OVSDB_DUMP_POLLING_INTERVAL,
        )
        collector = GTPStatsCollector(
            polling_interval,
            service.loop,
        )
        collector.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #14
0
def main():
    """ main() for subscriberdb """
    service = MagmaService('subscriberdb', mconfigs_pb2.SubscriberDB())

    # Optionally pipe errors to Sentry
    sentry_init()

    # Initialize a store to keep all subscriber data.
    store = SqliteStore(service.config['db_path'], loop=service.loop,
                        sid_digits=service.config['sid_last_n'])

    # Initialize the processor
    processor = Processor(store,
                          get_default_sub_profile(service),
                          service.mconfig.sub_profiles,
                          service.mconfig.lte_auth_op,
                          service.mconfig.lte_auth_amf)

    # Add all servicers to the server
    subscriberdb_servicer = SubscriberDBRpcServicer(
        store,
        service.config.get('print_grpc_payload', False))
    subscriberdb_servicer.add_to_server(service.rpc_server)


    # Start a background thread to stream updates from the cloud
    if service.config['enable_streaming']:
        callback = SubscriberDBStreamerCallback(store, service.loop)
        stream = StreamerClient({"subscriberdb": callback}, service.loop)
        stream.start()
    else:
        logging.info('enable_streaming set to False. Streamer disabled!')

    # Wait until the datastore is populated by addition or resync before
    # listening for clients.
    async def serve():
        if not store.list_subscribers():
            # Waiting for subscribers to be added to store
            await store.on_ready()

        if service.config['s6a_over_grpc']:
            logging.info('Running s6a over grpc')
            s6a_proxy_servicer = S6aProxyRpcServicer(processor)
            s6a_proxy_servicer.add_to_server(service.rpc_server)
        else:
            logging.info('Running s6a over DIAMETER')
            base_manager = base.BaseApplication(
                service.config['mme_realm'],
                service.config['mme_host_name'],
                service.config['mme_host_address'],
            )
            s6a_manager = _get_s6a_manager(service, processor)
            base_manager.register(s6a_manager)

            # Setup the Diameter/s6a MME
            s6a_server = service.loop.create_server(
                lambda: S6aServer(base_manager,
                              s6a_manager,
                              service.config['mme_realm'],
                              service.config['mme_host_name'],
                              loop=service.loop),
                service.config['host_address'], service.config['mme_port'])
            asyncio.ensure_future(s6a_server, loop=service.loop)
    asyncio.ensure_future(serve(), loop=service.loop)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #15
0
def main():
    """Main routine for subscriberdb service."""  # noqa: D401
    service = MagmaService('subscriberdb', mconfigs_pb2.SubscriberDB())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    # Initialize a store to keep all subscriber data.
    store = SqliteStore(
        service.config['db_path'],
        loop=service.loop,
        sid_digits=service.config['sid_last_n'],
    )

    # Initialize the processor
    processor = Processor(
        store,
        get_default_sub_profile(service),
        service.mconfig.sub_profiles,
        service.mconfig.lte_auth_op,
        service.mconfig.lte_auth_amf,
    )

    # Add all servicers to the server
    subscriberdb_servicer = SubscriberDBRpcServicer(
        store,
        service.config.get('print_grpc_payload', False),
    )
    subscriberdb_servicer.add_to_server(service.rpc_server)

    # Start a background thread to stream updates from the cloud
    if service.config['enable_streaming']:
        grpc_client_manager = GRPCClientManager(
            service_name="subscriberdb",
            service_stub=SubscriberDBCloudStub,
            max_client_reuse=60,
        )
        sync_interval = _randomize_sync_interval(
            service.config.get('subscriberdb_sync_interval'), )
        subscriber_page_size = service.config.get('subscriber_page_size')
        subscriberdb_cloud_client = SubscriberDBCloudClient(
            service.loop,
            store,
            subscriber_page_size,
            sync_interval,
            grpc_client_manager,
        )

        subscriberdb_cloud_client.start()
    else:
        logging.info(
            'enable_streaming set to False. Subscriber streaming '
            'disabled!', )

    # Wait until the datastore is populated by addition or resync before
    # listening for clients.
    async def serve():  # noqa: WPS430
        if not store.list_subscribers():
            # Waiting for subscribers to be added to store
            await store.on_ready()

        if service.config['s6a_over_grpc']:
            logging.info('Running s6a over grpc')
            s6a_proxy_servicer = S6aProxyRpcServicer(
                processor,
                service.config.get('print_grpc_payload', False),
            )
            s6a_proxy_servicer.add_to_server(service.rpc_server)
        else:
            logging.info('Running s6a over DIAMETER')
            base_manager = base.BaseApplication(
                service.config['mme_realm'],
                service.config['mme_host_name'],
                service.config['mme_host_address'],
            )
            s6a_manager = _get_s6a_manager(service, processor)
            base_manager.register(s6a_manager)

            # Setup the Diameter/s6a MME
            s6a_server = service.loop.create_server(
                lambda: S6aServer(
                    base_manager,
                    s6a_manager,
                    service.config['mme_realm'],
                    service.config['mme_host_name'],
                    loop=service.loop,
                ),
                service.config['host_address'],
                service.config['mme_port'],
            )
            asyncio.ensure_future(s6a_server, loop=service.loop)

    asyncio.ensure_future(serve(), loop=service.loop)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #16
0
def main():
    """
    Loads the Ryu apps we want to run from the config file.
    This should exit on keyboard interrupt.
    """

    # Run asyncio loop in a greenthread so we can evaluate other eventlets
    # TODO: Remove once Ryu migrates to asyncio
    asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())

    service = MagmaService('pipelined', mconfigs_pb2.PipelineD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config)

    service_config = service.config

    if environment.is_dev_mode():
        of_rest_server.configure(service_config)

    # Set Ryu config params
    cfg.CONF.ofp_listen_host = "127.0.0.1"

    # override mconfig using local config.
    # TODO: move config compilation to separate module.
    enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
    service.config['enable_nat'] = enable_nat
    logging.info("Nat: %s", enable_nat)
    enable5g_features = service.config.get(
        'enable5g_features',
        service.mconfig.enable5g_features,
    )
    service.config['enable5g_features'] = enable5g_features
    logging.info("enable5g_features: %s", enable5g_features)
    vlan_tag = service.config.get(
        'sgi_management_iface_vlan',
        service.mconfig.sgi_management_iface_vlan,
    )
    service.config['sgi_management_iface_vlan'] = vlan_tag

    sgi_ip = service.config.get(
        'sgi_management_iface_ip_addr',
        service.mconfig.sgi_management_iface_ip_addr,
    )
    service.config['sgi_management_iface_ip_addr'] = sgi_ip

    sgi_gateway_ip = service.config.get(
        'sgi_management_iface_gw',
        service.mconfig.sgi_management_iface_gw,
    )
    service.config['sgi_management_iface_gw'] = sgi_gateway_ip

    # SGi IPv6 address conf
    sgi_ipv6 = service.config.get(
        'sgi_management_iface_ipv6_addr',
        service.mconfig.sgi_management_iface_ipv6_addr,
    )
    service.config['sgi_management_iface_ipv6_addr'] = sgi_ipv6

    sgi_gateway_ipv6 = service.config.get(
        'sgi_management_iface_ipv6_gw',
        service.mconfig.sgi_management_iface_ipv6_gw,
    )
    service.config['sgi_management_iface_ipv6_gw'] = sgi_gateway_ipv6

    # Keep router mode off for smooth upgrade path
    service.config['dp_router_enabled'] = service.config.get(
        'dp_router_enabled',
        False,
    )
    if 'virtual_mac' not in service.config:
        if service.config['dp_router_enabled']:
            up_iface_name = service.config.get('nat_iface', None)
            mac_addr = get_if_hwaddr(up_iface_name)
        else:
            mac_addr = get_if_hwaddr(service.config.get('bridge_name'))

        service.config['virtual_mac'] = mac_addr

    # this is not read from yml file.
    service.config['uplink_port'] = OFPP_LOCAL
    uplink_port_name = service.config.get('ovs_uplink_port_name', None)
    if enable_nat is False and uplink_port_name is not None:
        service.config['uplink_port'] = BridgeTools.get_ofport(
            uplink_port_name,
        )

    # header enrichment related configuration.
    service.config['proxy_port_name'] = PROXY_PORT_NAME
    he_enabled_flag = False
    if service.mconfig.he_config:
        he_enabled_flag = service.mconfig.he_config.enable_header_enrichment
        if he_enabled_flag:
            bridge = service.config.get('bridge_name')
            BridgeTools.add_ovs_port(bridge, PROXY_PORT_NAME, PROXY_OF_PORT)

    he_enabled = service.config.get('he_enabled', he_enabled_flag)
    service.config['he_enabled'] = he_enabled

    # tune datapath according to config
    configure_tso(service.config)
    setup_sgi_tunnel(service.config, service.loop)
    tune_datapath(service.config)
    setup_masquerade_rule(service.config, service.loop)

    # monitoring related configuration
    mtr_interface = service.config.get('mtr_interface', None)
    if mtr_interface:
        try:
            service.config['mtr_ip'] = get_ip_from_if(mtr_interface)
        except ValueError:
            logging.warning("Unable to set up mtr interface", exc_info=True)

    # Load the ryu apps
    service_manager = ServiceManager(service)
    service_manager.load()

    service.loop.create_task(
        monitor_ifaces(
            service.config['monitored_ifaces'],
        ),
    )

    manager = AppManager.get_instance()
    # Add pipelined rpc servicer
    pipelined_srv = PipelinedRpcServicer(
        service.loop,
        manager.applications.get('GYController', None),
        manager.applications.get('EnforcementController', None),
        manager.applications.get('EnforcementStatsController', None),
        manager.applications.get('DPIController', None),
        manager.applications.get('UEMacAddressController', None),
        manager.applications.get('CheckQuotaController', None),
        manager.applications.get('IPFIXController', None),
        manager.applications.get('VlanLearnController', None),
        manager.applications.get('TunnelLearnController', None),
        manager.applications.get('Classifier', None),
        manager.applications.get('InOutController', None),
        manager.applications.get('NGServiceController', None),
        service.config,
        service_manager,
    )
    pipelined_srv.add_to_server(service.rpc_server)

    if service.config['setup_type'] == 'CWF':
        bridge_ip = service.config['bridge_ip_address']
        has_quota_port = service.config['has_quota_port']
        no_quota_port = service.config['no_quota_port']

        def on_exit_server_thread():
            service.StopService(None, None)

        # For CWF start quota check servers
        start_check_quota_server(
            run_flask, bridge_ip, has_quota_port, True,
            on_exit_server_thread,
        )
        start_check_quota_server(
            run_flask, bridge_ip, no_quota_port, False,
            on_exit_server_thread,
        )

    if service.config['setup_type'] == 'LTE':
        polling_interval = service.config.get(
            'ovs_gtp_stats_polling_interval',
            MIN_OVSDB_DUMP_POLLING_INTERVAL,
        )
        collector = GTPStatsCollector(
            polling_interval,
            service.loop,
        )
        collector.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #17
0
파일: main.py 프로젝트: jaredmullane/docu_2
def main():
    """ main() for MobilityD """
    service = MagmaService('mobilityd', mconfigs_pb2.MobilityD())

    # Optionally pipe errors to Sentry
    sentry_init()

    # Load service configs and mconfig
    config = service.config
    mconfig = service.mconfig

    multi_apn = config.get('multi_apn', mconfig.multi_apn_ip_alloc)
    static_ip_enabled = config.get('static_ip', mconfig.static_ip_enabled)
    allocator_type = mconfig.ip_allocator_type

    dhcp_iface = config.get('dhcp_iface', 'dhcp0')
    dhcp_retry_limit = config.get('retry_limit', 300)

    # TODO: consider adding gateway mconfig to decide whether to
    # persist to Redis
    client = get_default_client()
    store = MobilityStore(client, config.get('persist_to_redis', False),
                          config.get('redis_port', 6380))

    chan = ServiceRegistry.get_rpc_channel('subscriberdb',
                                           ServiceRegistry.LOCAL)
    ipv4_allocator = _get_ipv4_allocator(store, allocator_type,
                                         static_ip_enabled, multi_apn,
                                         dhcp_iface, dhcp_retry_limit,
                                         SubscriberDBStub(chan))

    # Init IPv6 allocator, for now only IP_POOL mode is supported for IPv6
    ipv6_prefix_allocation_type = mconfig.ipv6_prefix_allocation_type or \
                                  DEFAULT_IPV6_PREFIX_ALLOC_MODE
    ipv6_allocator = IPv6AllocatorPool(
        store=store, session_prefix_alloc_mode=ipv6_prefix_allocation_type)

    # Load IPAddressManager
    ip_address_man = IPAddressManager(ipv4_allocator, ipv6_allocator, store)

    # Add all servicers to the server
    mobility_service_servicer = MobilityServiceRpcServicer(
        ip_address_man, config.get('print_grpc_payload', False))
    mobility_service_servicer.add_to_server(service.rpc_server)

    # Load IPv4 and IPv6 blocks from the configurable mconfig file
    # No dynamic reloading support for now, assume restart after updates
    logging.info('Adding IPv4 block')
    ipv4_block = _get_ip_block(mconfig.ip_block)
    if ipv4_block is not None:
        try:
            mobility_service_servicer.add_ip_block(ipv4_block)
        except OverlappedIPBlocksError:
            logging.warning("Overlapped IPv4 block: %s", ipv4_block)

    logging.info('Adding IPv6 block')
    ipv6_block = _get_ip_block(mconfig.ipv6_block)
    if ipv6_block is not None:
        try:
            mobility_service_servicer.add_ip_block(ipv6_block)
        except OverlappedIPBlocksError:
            logging.warning("Overlapped IPv6 block: %s", ipv6_block)

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #18
0
def main():
    """
    Main magmad function
    """
    service = MagmaService('magmad', mconfigs_pb2.MagmaD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    logging.info('Starting magmad for UUID: %s', snowflake.make_snowflake())

    # Create service manager
    services = service.config.get('magma_services')
    init_system = service.config.get('init_system', 'systemd')
    registered_dynamic_services = service.config.get(
        'registered_dynamic_services',
        [],
    )
    enabled_dynamic_services = []
    if service.mconfig is not None:
        enabled_dynamic_services = service.mconfig.dynamic_services

    # Poll the services' Service303 interface
    service_poller = ServicePoller(
        service.loop,
        service.config,
        enabled_dynamic_services,
    )
    service_poller.start()

    service_manager = ServiceManager(
        services,
        init_system,
        service_poller,
        registered_dynamic_services,
        enabled_dynamic_services,
    )

    # Get metrics service config
    metrics_config = service.config.get('metricsd')
    metrics_services = metrics_config['services']
    collect_interval = metrics_config['collect_interval']
    sync_interval = metrics_config['sync_interval']
    grpc_timeout = metrics_config['grpc_timeout']
    grpc_msg_size = metrics_config.get('max_grpc_msg_size_mb', 4)
    metrics_post_processor_fn = metrics_config.get('post_processing_fn')

    metric_scrape_targets = [
        ScrapeTarget(t['url'], t['name'], t['interval'])
        for t in metrics_config.get('metric_scrape_targets', [])
    ]

    # Create local metrics collector
    metrics_collector = MetricsCollector(
        services=metrics_services,
        collect_interval=collect_interval,
        sync_interval=sync_interval,
        grpc_timeout=grpc_timeout,
        grpc_max_msg_size_mb=grpc_msg_size,
        loop=service.loop,
        post_processing_fn=get_metrics_postprocessor_fn(
            metrics_post_processor_fn, ),
        scrape_targets=metric_scrape_targets,
    )

    # Poll and sync the metrics collector loops
    metrics_collector.run()

    # Start a background thread to stream updates from the cloud
    stream_client = None
    if service.config.get('enable_config_streamer', False):
        stream_client = StreamerClient(
            {
                CONFIG_STREAM_NAME:
                ConfigManager(
                    services,
                    service_manager,
                    service,
                    MconfigManagerImpl(),
                ),
            },
            service.loop,
        )

    # Create sync rpc client with a heartbeat of 30 seconds (timeout = 60s)
    sync_rpc_client = None
    if service.config.get('enable_sync_rpc', False):
        sync_rpc_client = SyncRPCClient(
            service.loop,
            30,
            service.config.get('print_grpc_payload', False),
        )

    first_time_bootstrap = True

    # This is called when bootstrap succeeds and when _bootstrap_check is
    # invoked but bootstrap is not needed. If it's invoked right after certs
    # are generated, certs_generated is true, control_proxy will restart.
    async def bootstrap_success_cb(certs_generated: bool):
        nonlocal first_time_bootstrap
        if first_time_bootstrap:
            if stream_client:
                stream_client.start()
            if sync_rpc_client:
                sync_rpc_client.start()
            first_time_bootstrap = False
        if certs_generated:
            svcs_to_restart = []
            if 'control_proxy' in services:
                svcs_to_restart.append('control_proxy')

            # fluent-bit caches TLS client certs in memory, so we need to
            # restart it whenever the certs change
            fresh_mconfig = get_mconfig_manager().load_service_mconfig(
                'magmad',
                mconfigs_pb2.MagmaD(),
            )
            dynamic_svcs = fresh_mconfig.dynamic_services or []
            if 'td-agent-bit' in dynamic_svcs:
                svcs_to_restart.append('td-agent-bit')

            await service_manager.restart_services(services=svcs_to_restart)

    # Create bootstrap manager
    bootstrap_manager = BootstrapManager(service, bootstrap_success_cb)

    # Initialize kernel version poller if it is enabled
    kernel_version_poller = None
    if service.config.get('enable_kernel_version_checking', False):
        kernel_version_poller = KernelVersionsPoller(service)
        kernel_version_poller.start()

    # gateway status generator to bundle various information about this
    # gateway into an object.
    gateway_status_factory = GatewayStatusFactory(
        service=service,
        service_poller=service_poller,
        kernel_version_poller=kernel_version_poller,
    )

    # _grpc_client_manager to manage grpc client recycling
    grpc_client_manager = GRPCClientManager(
        service_name="state",
        service_stub=StateServiceStub,
        max_client_reuse=60,
    )

    # Initialize StateReporter
    state_reporter = StateReporter(
        config=service.config,
        mconfig=service.mconfig,
        loop=service.loop,
        bootstrap_manager=bootstrap_manager,
        gw_status_factory=gateway_status_factory,
        grpc_client_manager=grpc_client_manager,
    )

    # Initialize ServiceHealthWatchdog
    service_health_watchdog = ServiceHealthWatchdog(
        config=service.config,
        loop=service.loop,
        service_poller=service_poller,
        service_manager=service_manager,
    )

    # Start _bootstrap_manager
    bootstrap_manager.start_bootstrap_manager()

    # Start all services when magmad comes up
    service.loop.create_task(service_manager.start_services())

    # Start state reporting loop
    state_reporter.start()

    # Start service timeout health check loop
    service_health_watchdog.start()

    # Start upgrade manager loop
    if service.config.get('enable_upgrade_manager', False):
        upgrader = _get_upgrader_impl(service)
        service.loop.create_task(start_upgrade_loop(service, upgrader))

    # Start network health metric collection
    if service.config.get('enable_network_monitor', False):
        service.loop.create_task(metrics_collection_loop(service.config))

    # Create generic command executor
    command_executor = None
    if service.config.get('generic_command_config', None):
        command_executor = get_command_executor_impl(service)

    # Start loop to monitor unattended upgrade status
    service.loop.create_task(monitor_unattended_upgrade_status())

    # Add all servicers to the server
    magmad_servicer = MagmadRpcServicer(
        service,
        services,
        service_manager,
        get_mconfig_manager(),
        command_executor,
        service.loop,
        service.config.get('print_grpc_payload', False),
    )
    magmad_servicer.add_to_server(service.rpc_server)

    if SDWatchdog.has_notify():
        # Create systemd watchdog
        sdwatchdog = SDWatchdog(
            tasks=[bootstrap_manager, state_reporter],
            update_status=True,
        )
        # Start watchdog loop
        service.loop.create_task(sdwatchdog.run())

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
예제 #19
0
def main():
    """Start mobilityd"""
    service = MagmaService('mobilityd', mconfigs_pb2.MobilityD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    # Load service configs and mconfig
    config = service.config
    mconfig = service.mconfig

    multi_apn = config.get('multi_apn', mconfig.multi_apn_ip_alloc)
    static_ip_enabled = config.get('static_ip', mconfig.static_ip_enabled)
    allocator_type = mconfig.ip_allocator_type

    dhcp_iface = config.get('dhcp_iface', 'dhcp0')
    dhcp_retry_limit = config.get('retry_limit', RETRY_LIMIT)

    # TODO: consider adding gateway mconfig to decide whether to
    # persist to Redis
    client = get_default_client()
    store = MobilityStore(
        client, config.get('persist_to_redis', False),
        config.get('redis_port', DEFAULT_REDIS_PORT),
    )

    chan = ServiceRegistry.get_rpc_channel(
        'subscriberdb',
        ServiceRegistry.LOCAL,
    )
    ipv4_allocator = _get_ipv4_allocator(
        store, allocator_type,
        static_ip_enabled, multi_apn,
        dhcp_iface, dhcp_retry_limit,
        SubscriberDBStub(chan),
    )

    # Init IPv6 allocator, for now only IP_POOL mode is supported for IPv6
    ipv6_allocator = IPv6AllocatorPool(
        store=store,
        session_prefix_alloc_mode=_get_value_or_default(
            mconfig.ipv6_prefix_allocation_type,
            DEFAULT_IPV6_PREFIX_ALLOC_MODE,
        ),
    )

    # Load IPAddressManager
    ip_address_man = IPAddressManager(ipv4_allocator, ipv6_allocator, store)

    # Load IPv4 and IPv6 blocks from the configurable mconfig file
    # No dynamic reloading support for now, assume restart after updates
    ipv4_block = _get_ip_block(mconfig.ip_block, "IPv4")
    if ipv4_block is not None:
        logging.info('Adding IPv4 block')
        try:
            allocated_ip_blocks = ip_address_man.list_added_ip_blocks()
            if ipv4_block not in allocated_ip_blocks:
                # Cleanup previously allocated IP blocks
                ip_address_man.remove_ip_blocks(*allocated_ip_blocks, force=True)
                ip_address_man.add_ip_block(ipv4_block)
        except OverlappedIPBlocksError:
            logging.warning("Overlapped IPv4 block: %s", ipv4_block)

    ipv6_block = _get_ip_block(mconfig.ipv6_block, "IPv6")
    if ipv6_block is not None:
        logging.info('Adding IPv6 block')
        try:
            allocated_ipv6_block = ip_address_man.get_assigned_ipv6_block()
            if ipv6_block != allocated_ipv6_block:
                ip_address_man.add_ip_block(ipv6_block)
        except OverlappedIPBlocksError:
            logging.warning("Overlapped IPv6 block: %s", ipv6_block)

    # Add all servicers to the server
    mobility_service_servicer = MobilityServiceRpcServicer(
        ip_address_man, config.get('print_grpc_payload', False),
    )
    mobility_service_servicer.add_to_server(service.rpc_server)
    service.run()

    # Cleanup the service
    service.close()