def get_context(): """ Create the context to be used for nghttpx, other than the one provided by the configs. """ context = {} context['backends'] = [] for service in ServiceRegistry.list_services(): (ip_address, port) = ServiceRegistry.get_service_address(service) backend = {'service': service, 'ip': ip_address, 'port': port} context['backends'].append(backend) # We get the gateway cert after bootstrapping, but we do want nghttpx # to run before that for communication locally. Update the flag for # jinja to act upon. gateway_cert = get_service_config_value('control_proxy', 'gateway_cert', None) if gateway_cert and os.path.exists(gateway_cert): context['use_gateway_cert'] = True else: context['use_gateway_cert'] = False context['dev_mode'] = is_dev_mode() context['allow_http_proxy'] = get_service_config_value( 'control_proxy', 'allow_http_proxy', False) context['http_proxy'] = os.getenv('http_proxy', '') return context
def _get_context(): """ Create the context which has the interface IP and the OAI log level to use. """ context = {} context["s11_ip"] = _get_iface_ip("spgw", "s11_iface_name") context["s1ap_ip"] = _get_iface_ip("mme", "s1ap_iface_name") context["s1u_ip"] = _get_iface_ip("spgw", "s1u_iface_name") context["oai_log_level"] = _get_oai_log_level() context["ipv4_dns"] = _get_dns_ip("dns_iface_name") context["identity"] = _get_identity() context["relay_enabled"] = _get_relay_enabled() context["non_eps_service_control"] = _get_non_eps_service_control() context["csfb_mcc"] = _get_csfb_mcc() context["csfb_mnc"] = _get_csfb_mnc() context["lac"] = _get_lac() context["use_stateless"] = get_service_config_value( "mme", "use_stateless", "") context["attached_enodeb_tacs"] = _get_attached_enodeb_tacs() # set ovs params for key in ( "ovs_bridge_name", "ovs_gtp_port_number", "ovs_uplink_port_number", "ovs_uplink_mac", ): context[key] = get_service_config_value("spgw", key, "") return context
def get_sentry_dsn_and_sample_rate( sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> Tuple[str, float]: """Get Sentry configs with the following priority 1) control_proxy.yml (if sentry_python_url is present) 2) shared mconfig (i.e. first streamed mconfig from orc8r, if not present default mconfig in /etc/magma) Args: sentry_mconfig (SharedSentryConfig): proto message of shared mconfig Returns: (str, float): sentry url, sentry sample rate """ dsn_python = get_service_config_value( CONTROL_PROXY, SENTRY_URL, default='', ) if not dsn_python: dsn_python = sentry_mconfig.dsn_python sample_rate = sentry_mconfig.sample_rate return dsn_python, sample_rate sample_rate = get_service_config_value( CONTROL_PROXY, SENTRY_SAMPLE_RATE, default=DEFAULT_SAMPLE_RATE, ) return dsn_python, sample_rate
def get_default_client(): """ Return a default redis client using the configured port in redis.yml """ redis_port = get_service_config_value('redis', 'port', 6379) redis_addr = get_service_config_value('redis', 'bind', 'localhost') return redis.Redis(host=redis_addr, port=redis_port)
def __init__(self, stream_callbacks, loop): """ Args: stream_callbacks ({string: Callback}): Mapping of stream names to callbacks to subscribe to. loop: asyncio event loop to schedule the callback """ threading.Thread.__init__(self) self._stream_callbacks = stream_callbacks self._loop = loop # Set this thread as daemon thread. We can kill this background # thread abruptly since we handle all updates (and database # transactions) in the asyncio event loop. self.daemon = True # Don't allow stream update rate faster than every 5 seconds self._reconnect_pause = get_service_config_value( 'streamer', 'reconnect_sec', 60, ) self._reconnect_pause = max(5, self._reconnect_pause) logging.info("Streamer reconnect pause: %d", self._reconnect_pause) self._stream_timeout = get_service_config_value( 'streamer', 'stream_timeout', 150, ) logging.info("Streamer timeout: %d", self._stream_timeout)
def _get_dns_ip(iface_config): """ Get dnsd interface IP without netmask. If caching is enabled, use the ip of interface that dnsd listens over. Otherwise, just use dns server in yml. """ if load_service_mconfig('mme').enable_dns_caching: iface_name = get_service_config_value('dnsd', iface_config, '') return get_ip_from_if(iface_name) return get_service_config_value('spgw', 'ipv4_dns', '')
def _get_dns_ip(iface_config): """ Get dnsd interface IP without netmask. If caching is enabled, use the ip of interface that dnsd listens over. Otherwise, just use dns server in yml. """ if load_service_mconfig("mme", MME()).enable_dns_caching: iface_name = get_service_config_value("dnsd", iface_config, "") return get_ip_from_if(iface_name) return get_service_config_value("spgw", "ipv4_dns", "")
def _get_context(): """ Create the context which has the interface IP and the OAI log level to use. """ mme_service_config = load_service_mconfig("mme", MME()) context = { "mme_s11_ip": _get_iface_ip("mme", "s11_iface_name"), "sgw_s11_ip": _get_iface_ip("spgw", "s11_iface_name"), "sgw_s5s8_up_ip": _get_iface_ip("spgw", "sgw_s5s8_up_iface_name"), "remote_sgw_ip": get_service_config_value("mme", "remote_sgw_ip", ""), "s1ap_ip": _get_iface_ip("mme", "s1ap_iface_name"), "oai_log_level": _get_oai_log_level(), "ipv4_dns": _get_primary_dns_ip(mme_service_config, "dns_iface_name"), "ipv4_sec_dns": _get_secondary_dns_ip(mme_service_config), "ipv4_p_cscf_address": _get_ipv4_pcscf_ip(mme_service_config), "ipv6_dns": _get_ipv6_dns_ip(mme_service_config), "ipv6_p_cscf_address": _get_ipv6_pcscf_ip(mme_service_config), "identity": _get_identity(), "relay_enabled": _get_relay_enabled(mme_service_config), "non_eps_service_control": _get_non_eps_service_control(mme_service_config), "csfb_mcc": _get_csfb_mcc(mme_service_config), "csfb_mnc": _get_csfb_mnc(mme_service_config), "lac": _get_lac(mme_service_config), "use_stateless": get_service_config_value("mme", "use_stateless", ""), "attached_enodeb_tacs": _get_attached_enodeb_tacs(mme_service_config), "enable_nat": _get_enable_nat(mme_service_config), "federated_mode_map": _get_federated_mode_map(mme_service_config), "restricted_plmns": _get_restricted_plmns(mme_service_config), "restricted_imeis": _get_restricted_imeis(mme_service_config), } context["s1u_ip"] = mme_service_config.ipv4_sgw_s1u_addr or _get_iface_ip( "spgw", "s1u_iface_name" ) # set ovs params for key in ( "ovs_bridge_name", "ovs_gtp_port_number", "ovs_mtr_port_number", "ovs_internal_sampling_port_number", "ovs_internal_sampling_fwd_tbl", "ovs_uplink_port_number", "ovs_uplink_mac", ): context[key] = get_service_config_value("spgw", key, "") context["enable_apn_correction"] = get_service_config_value( "mme", "enable_apn_correction", "" ) context["apn_correction_map_list"] = _get_apn_correction_map_list( mme_service_config ) return context
def sentry_init(): """ Initialize connection and start piping errors to sentry.io """ sentry_url = get_service_config_value('control_proxy', 'sentry_url', default="") if sentry_url: sentry_sample_rate = get_service_config_value('control_proxy', 'sentry_sample_rate', default=1.0) sentry_sdk.init(dsn=sentry_url, traces_sample_rate=sentry_sample_rate)
def main(): """ main() for redirectd. Starts the server threads. """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) # Optionally pipe errors to Sentry sentry_init(service_name=service.name) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
def main(): """ main() for redirectd Initializes the scribe logger, starts the server threads """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) scribe_logger = None if service.config.get('scribe_logging_enabled', False): scribe_logger = RedirectScribeLogger(service.loop) redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, ) if redirect_ip is None: logging.error("ERROR bridge_ip_address not found in pipelined config") service.close() return http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) run_server_thread(run_flask, redirect_ip, http_port, scribe_logger, exit_callback) # Run the service loop service.run() # Cleanup the service service.close()
def __init__( self, bridge_ip, logger, main_tbl_num, stats_table, next_table, scratch_table_num, session_rule_version_mapper, ): self._bridge_ip = bridge_ip self.logger = logger self.main_tbl_num = main_tbl_num self.stats_table = stats_table self.next_table = next_table self._scratch_tbl_num = scratch_table_num self._redirect_dict = RedirectDict() self._dns_cache = Memoizer({}) self._redirect_port = get_service_config_value( 'redirectd', 'http_port', 8080, ) self._session_rule_version_mapper = session_rule_version_mapper self._cwf_args_set = False self._mac_rewrite_scratch = None self._internal_ip_allocator = None self._arpd_controller_fut = None self._arp_contoller = None self._egress_table = None self._bridge_mac = None
def filter_configs_by_key(configs_by_key: Dict[str, TAny]) -> Dict[str, TAny]: """ Given a JSON-deserialized map of mconfig protobuf Any's keyed by service name, filter out any entires without a corresponding service or which have values that aren't registered in the protobuf symbol database yet. Args: configs_by_key: JSON-deserialized service mconfigs keyed by service name Returns: The input map without any services which currently don't exist or have types which are not in the protobuf type registry. """ services = service_configs.get_service_config_value( 'magmad', 'magma_services', [], ) services.append('magmad') services = set(services) filtered_configs_by_key = {} for srv, cfg in configs_by_key.items(): if srv not in services: continue try: type_name = cfg['@type'].split('/')[-1] symbol_database.Default().GetSymbol(type_name) except KeyError: continue filtered_configs_by_key[srv] = cfg return filtered_configs_by_key
def sentry_init(service_name: str, sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> None: """Initialize connection and start piping errors to sentry.io.""" sentry_config = _get_shared_sentry_config(sentry_mconfig) if not sentry_config.dsn: logging.info( 'Sentry disabled because of missing dsn_python. ' 'See documentation (Configure > AGW) on how to configure ' 'Sentry dsn.', ) return sentry_sdk.init( dsn=sentry_config.dsn, release=os.getenv(COMMIT_HASH), traces_sample_rate=sentry_config.sample_rate, before_send=_get_before_send_hook(sentry_config.exclusion_patterns), ) cloud_address = get_service_config_value( CONTROL_PROXY, CLOUD_ADDRESS, default=None, ) sentry_sdk.set_tag(ORC8R_CLOUD_ADDRESS, cloud_address) sentry_sdk.set_tag(HWID, snowflake.snowflake()) sentry_sdk.set_tag(SERVICE_NAME, service_name)
def filter_configs_by_key(configs_by_key: Dict[str, TAny]) -> Dict[str, TAny]: """ Given a JSON-deserialized map of mconfig protobuf Any's keyed by service name, filter out any entires without a corresponding service or which have values that aren't registered in the protobuf symbol database yet. Args: configs_by_key: JSON-deserialized service mconfigs keyed by service name Returns: The input map without any services which currently don't exist. """ services = service_configs.get_service_config_value( 'magmad', 'magma_services', [], ) services.append('magmad') services = set(services) filtered_configs_by_key = {} for srv, cfg in configs_by_key.items(): if srv not in services: continue filtered_configs_by_key[srv] = cfg return filtered_configs_by_key
def sentry_init(service_name: str, sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> None: """Initialize connection and start piping errors to sentry.io.""" sentry_status = get_sentry_status(service_name) if sentry_status == SentryStatus.DISABLED: return dsn_python, sample_rate = get_sentry_dsn_and_sample_rate(sentry_mconfig) if not dsn_python: logging.info( 'Sentry disabled because of missing dsn_python. ' 'See documentation (Configure > AGW) on how to configure ' 'Sentry dsn.', ) return sentry_sdk.init( dsn=dsn_python, release=os.getenv(COMMIT_HASH), traces_sample_rate=sample_rate, before_send=_ignore_if_not_marked if sentry_status == SentryStatus.SEND_SELECTED_ERRORS else None, ) cloud_address = get_service_config_value( CONTROL_PROXY, CLOUD_ADDRESS, default=None, ) sentry_sdk.set_tag(ORC8R_CLOUD_ADDRESS, cloud_address) sentry_sdk.set_tag(HWID, snowflake.snowflake()) sentry_sdk.set_tag(SERVICE_NAME, service_name)
def sentry_init(): """ Initialize connection and start piping errors to sentry.io """ sentry_url = get_service_config_value('control_proxy', 'sentry_url', default="") if sentry_url: sentry_sample_rate = get_service_config_value('control_proxy', 'sentry_sample_rate', default=1.0) sentry_sdk.init( dsn=sentry_url, release=os.environ['COMMIT_HASH'], traces_sample_rate=sentry_sample_rate, ) sentry_sdk.set_tag("hwid", snowflake.snowflake())
def __init__(self, bridge_ip, logger, tbl_num, next_table): self._bridge_ip = bridge_ip self.logger = logger self.tbl_num = tbl_num self.next_table = next_table self._redirect_dict = RedirectDict() self._dns_cache = Memoizer({}) self._redirect_port = get_service_config_value( 'redirectd', 'http_port', 8080)
def main(): logging.basicConfig( level=logging.INFO, format="[%(asctime)s %(levelname)s %(name)s] %(message)s" ) context = _get_context() generate_template_config("spgw", "spgw", CONFIG_OVERRIDE_DIR, context.copy()) generate_template_config("mme", "mme", CONFIG_OVERRIDE_DIR, context.copy()) generate_template_config("mme", "mme_fd", CONFIG_OVERRIDE_DIR, context.copy()) cert_dir = get_service_config_value("mme", "cert_dir", "") generate_mme_certs(os.path.join(cert_dir, "freeDiameter"))
def _get_enable_nat(): nat_enabled = get_service_config_value("mme", "enable_nat", None) if nat_enabled is None: nat_enabled = load_service_mconfig("mme", MME()).nat_enabled if nat_enabled is not None: return nat_enabled return True
def _get_primary_dns_ip(service_mconfig, iface_config): """ Get dnsd interface IP without netmask. If caching is enabled, use the ip of interface that dnsd listens over. Otherwise, use dns server from service mconfig. """ if service_mconfig.enable_dns_caching: iface_name = get_service_config_value("dnsd", iface_config, "") return get_ip_from_if(iface_name) else: return service_mconfig.dns_primary or DEFAULT_DNS_IP_PRIMARY_ADDR
def _get_enable_nat(service_mconfig): """ Retrieves enable_nat config value, prioritizes service config file, if not found, it uses service mconfig value. """ nat_enabled = get_service_config_value('mme', 'enable_nat', None) if nat_enabled is None: nat_enabled = service_mconfig.nat_enabled return nat_enabled
def get_sentry_status(service_name: str) -> SentryStatus: """Get Sentry status from service config value""" try: return SentryStatus( get_service_config_value( service_name, SENTRY_CONFIG, default=SentryStatus.DISABLED.value, ), ) except ValueError: return SentryStatus.DISABLED
def sentry_init(): """Initialize connection and start piping errors to sentry.io.""" sentry_url = get_service_config_value( CONTROL_PROXY, SENTRY_URL, default='', ) if not sentry_url: return sentry_sample_rate = get_service_config_value( CONTROL_PROXY, SENTRY_SAMPLE_RATE, default=1.0, ) sentry_sdk.init( dsn=sentry_url, release=os.getenv(COMMIT_HASH), traces_sample_rate=sentry_sample_rate, ) sentry_sdk.set_tag(HWID, snowflake.snowflake())
def main(): logging.basicConfig( level=logging.INFO, format='[%(asctime)s %(levelname)s %(name)s] %(message)s') context = _get_context() generate_template_config('spgw', 'spgw', CONFIG_OVERRIDE_DIR, context.copy()) generate_template_config('mme', 'mme', CONFIG_OVERRIDE_DIR, context.copy()) generate_template_config('mme', 'mme_fd', CONFIG_OVERRIDE_DIR, context.copy()) cert_dir = get_service_config_value('mme', 'cert_dir', "") generate_mme_certs(os.path.join(cert_dir, 'freeDiameter'))
def __init__(self, bridge_ip, logger, main_tbl_num, next_table, scratch_table_num, session_rule_version_mapper): self._bridge_ip = bridge_ip self.logger = logger self.main_tbl_num = main_tbl_num self.next_table = next_table self._scratch_tbl_num = scratch_table_num self._redirect_dict = RedirectDict() self._dns_cache = Memoizer({}) self._redirect_port = get_service_config_value( 'redirectd', 'http_port', 8080) self._session_rule_version_mapper = session_rule_version_mapper
def _get_oai_log_level(): """ Convert the logLevel in config into the level which OAI code uses. We use OAI's 'TRACE' as the debugging log level and 'CRITICAL' as the fatal log level. """ oai_log_level = get_service_config_value("mme", "log_level", "INFO") # Translate common log levels to OAI levels if oai_log_level == "DEBUG": oai_log_level = "TRACE" if oai_log_level == "FATAL": oai_log_level = "CRITICAL" return oai_log_level
def _get_oai_log_level(): """ Convert the logLevel in config into the level which OAI code uses. We use OAI's 'TRACE' as the debugging log level and 'CRITICAL' as the fatal log level. """ oai_log_level = get_service_config_value('mme', 'log_level', 'INFO') # Translate common log levels to OAI levels if oai_log_level == 'DEBUG': oai_log_level = 'TRACE' if oai_log_level == 'FATAL': oai_log_level = 'CRITICAL' return oai_log_level
def _get_context(): """ Create the context which has the interface IP and the OAI log level to use. """ context = {} context["mme_s11_ip"] = _get_iface_ip("mme", "s11_iface_name") context["sgw_s11_ip"] = _get_iface_ip("spgw", "s11_iface_name") context["remote_sgw_ip"] = get_service_config_value( "mme", "remote_sgw_ip", "") context["s1ap_ip"] = _get_iface_ip("mme", "s1ap_iface_name") context["s1u_ip"] = _get_iface_ip("spgw", "s1u_iface_name") context["oai_log_level"] = _get_oai_log_level() context['ipv4_dns'] = _get_primary_dns_ip('dns_iface_name') context['ipv4_sec_dns'] = _get_secondary_dns_ip() context["identity"] = _get_identity() context["relay_enabled"] = _get_relay_enabled() context["non_eps_service_control"] = _get_non_eps_service_control() context["csfb_mcc"] = _get_csfb_mcc() context["csfb_mnc"] = _get_csfb_mnc() context["lac"] = _get_lac() context["use_stateless"] = get_service_config_value( "mme", "use_stateless", "") context["attached_enodeb_tacs"] = _get_attached_enodeb_tacs() context["enable_nat"] = _get_enable_nat() # set ovs params for key in ( "ovs_bridge_name", "ovs_gtp_port_number", "ovs_mtr_port_number", "ovs_internal_sampling_port_number", "ovs_internal_sampling_fwd_tbl", "ovs_uplink_port_number", "ovs_uplink_mac", ): context[key] = get_service_config_value("spgw", key, "") context["enable_apn_correction"] = get_service_config_value( "mme", "enable_apn_correction", "") context["apn_correction_map_list"] = _get_apn_correction_map_list() return context
def _get_context(): """ Create the context which has the interface IP and the OAI log level to use. """ context = {} context['s11_ip'] = _get_iface_ip('spgw', 's11_iface_name') context['s1ap_ip'] = _get_iface_ip('mme', 's1ap_iface_name') context['s1u_ip'] = _get_iface_ip('spgw', 's1u_iface_name') context['oai_log_level'] = _get_oai_log_level() context['ipv4_dns'] = _get_dns_ip('dns_iface_name') realm = get_service_config_value('mme', 'realm', "") context['identity'] = socket.gethostname() + '.' + realm context['relay_enabled'] = _get_relay_enabled() context['non_eps_service_control'] = _get_non_eps_service_control() context['csfb_mcc'] = _get_csfb_mcc() context['csfb_mnc'] = _get_csfb_mnc() context['lac'] = _get_lac() # set ovs params for key in ('ovs_bridge_name', 'ovs_gtp_port_number', 'ovs_uplink_port_number', 'ovs_uplink_mac'): context[key] = get_service_config_value('spgw', key, '') return context