def __init__(self, magma_service: MagmaService): self._magma_service = magma_service if 'enable5g_features' not in magma_service.config: self._5G_flag_enable = False else: self._5G_flag_enable = magma_service.config.get( 'enable5g_features') # ingress, middle and egress are mandatory apps and occupy: # table 1 (for ingress) # table 10 (for middle) # table 20 (for egress) self._apps = [ App( name=IngressController.APP_NAME, module=IngressController.__module__, type=None, order_priority=0, ), App( name=MiddleController.APP_NAME, module=MiddleController.__module__, type=None, order_priority=0, ), App( name=EgressController.APP_NAME, module=EgressController.__module__, type=None, order_priority=0, ), ] self._table_manager = _TableManager() self.rule_id_mapper = RuleIDToNumMapper() self.session_rule_version_mapper = SessionRuleToVersionMapper() self.interface_to_prefix_mapper = InterfaceIDToPrefixMapper() self.restart_info_store = RestartInfoStore() self.ebpf = get_ebpf_manager(magma_service.config) apps = self._get_static_apps() apps.extend(self._get_dynamic_apps()) apps.sort(key=lambda x: x.order_priority) self._apps.extend(apps) # Filter out reserved apps and apps that don't need a table for app in apps: if app.name in self.STATIC_APP_WITH_NO_TABLE: continue # UE MAC service must be registered with Table 0 if app.name in [ self.UE_MAC_ADDRESS_SERVICE_NAME, self.XWF_PASSTHRU_NAME ]: self._table_manager.register_apps_for_table0_service([app]) continue if self._5G_flag_enable: if app.name in [self.CLASSIFIER_NAME]: self._table_manager.register_apps_for_table0_service([app]) continue self._table_manager.register_apps_for_service([app])
def __init__(self, magma_service: MagmaService): self._magma_service = magma_service # inout is a mandatory app and it occupies: # table 1(for ingress) # table 10(for middle) # table 20(for egress) self._apps = [ App(name=InOutController.APP_NAME, module=InOutController.__module__, type=None, order_priority=0) ] self._table_manager = _TableManager() self.rule_id_mapper = RuleIDToNumMapper() self.session_rule_version_mapper = SessionRuleToVersionMapper() apps = self._get_static_apps() apps.extend(self._get_dynamic_apps()) apps.sort(key=lambda x: x.order_priority) self._apps.extend(apps) # Filter out reserved apps and apps that don't need a table for app in apps: if app.name in self.STATIC_APP_WITH_NO_TABLE: continue # UE MAC service must be registered with Table 0 if app.name in [ self.UE_MAC_ADDRESS_SERVICE_NAME, self.XWF_PASSTHRU_NAME ]: self._table_manager.register_apps_for_table0_service([app]) continue self._table_manager.register_apps_for_service([app])
def __init__(self, magma_service: MagmaService): self._magma_service = magma_service if '5G_feature_set' not in magma_service.config: self._5G_flag_enable = False else: ng_flag = magma_service.config.get('5G_feature_set') self._5G_flag_enable = ng_flag['enable'] # inout is a mandatory app and it occupies: # table 1(for ingress) # table 10(for middle) # table 20(for egress) self._apps = [ App(name=InOutController.APP_NAME, module=InOutController.__module__, type=None, order_priority=0) ] self._table_manager = _TableManager() self.rule_id_mapper = RuleIDToNumMapper() self.session_rule_version_mapper = SessionRuleToVersionMapper() self.interface_to_prefix_mapper = InterfaceIDToPrefixMapper() self.tunnel_id_mapper = TunnelToTunnelMapper() apps = self._get_static_apps() apps.extend(self._get_dynamic_apps()) apps.sort(key=lambda x: x.order_priority) self._apps.extend(apps) # Filter out reserved apps and apps that don't need a table for app in apps: if app.name in self.STATIC_APP_WITH_NO_TABLE: continue # UE MAC service must be registered with Table 0 if app.name in [ self.UE_MAC_ADDRESS_SERVICE_NAME, self.XWF_PASSTHRU_NAME ]: self._table_manager.register_apps_for_table0_service([app]) continue if self._5G_flag_enable: if app.name in [self.CLASSIFIER_NAME]: self._table_manager.register_apps_for_table0_service([app]) continue self._table_manager.register_apps_for_service([app])
def start_ryu_apps(self, launch_successful_future): """ Starts up ryu applications, all the configuration is parsed from the test_setup config provided in the unit test. If apps throw an exception on launch, error is passed in the launch_successful_future and will prevent infinitely waiting. """ self.reset_static_vars() hub.spawn(self._process_queue) app_lists = [a.value.name for a in self._test_setup.apps] app_futures = { controller.value.app_future: future for (controller, future) in self._test_setup.references.items() } manager = AppManager.get_instance() manager.load_apps(app_lists) contexts = manager.create_contexts() contexts['sids_by_ip'] = {} # shared by both metering apps contexts['rule_id_mapper'] = RuleIDToNumMapper() contexts['internal_ip_allocator'] = \ InternalIPAllocator(self._test_setup.config) contexts['session_rule_version_mapper'] = \ self._test_setup.service_manager.session_rule_version_mapper contexts['app_futures'] = app_futures contexts['config'] = self._test_setup.config contexts['mconfig'] = self._test_setup.mconfig contexts['loop'] = self._test_setup.loop contexts['rpc_stubs'] = self._test_setup.rpc_stubs contexts['service_manager'] = self._test_setup.service_manager logging.basicConfig( level=logging.INFO, format='[%(asctime)s %(levelname)s %(name)s] %(message)s') services = [] try: services.extend(manager.instantiate_apps(**contexts)) except Exception as e: launch_successful_future.set_result( "Ryu apps launch exception: {}".format(e)) raise launch_successful_future.set_result("Setup successful") self.run(manager)
def load(self): """ Instantiates and schedules the Ryu app eventlets in the service eventloop. """ manager = AppManager.get_instance() manager.load_apps([app.module for app in self._apps]) contexts = manager.create_contexts() contexts['rule_id_mapper'] = RuleIDToNumMapper() contexts[ 'session_rule_version_mapper'] = self.session_rule_version_mapper contexts['app_futures'] = {app.name: Future() for app in self._apps} contexts['config'] = self._magma_service.config contexts['mconfig'] = self._magma_service.mconfig contexts['loop'] = self._magma_service.loop contexts['service_manager'] = self records_chan = ServiceRegistry.get_rpc_channel('meteringd_records', ServiceRegistry.CLOUD) sessiond_chan = ServiceRegistry.get_rpc_channel( 'sessiond', ServiceRegistry.LOCAL) mobilityd_chan = ServiceRegistry.get_rpc_channel( 'mobilityd', ServiceRegistry.LOCAL) contexts['rpc_stubs'] = { 'metering_cloud': MeteringdRecordsControllerStub(records_chan), 'mobilityd': MobilityServiceStub(mobilityd_chan), 'sessiond': LocalSessionManagerStub(sessiond_chan), } # Instantiate and schedule apps for app in manager.instantiate_apps(**contexts): # Wrap the eventlet in asyncio so it will stop when the loop is # stopped future = aioeventlet.wrap_greenthread(app, self._magma_service.loop) # Schedule the eventlet for evaluation in service loop asyncio.ensure_future(future) # In development mode, run server so that if environment.is_dev_mode(): server_thread = of_rest_server.start(manager) future = aioeventlet.wrap_greenthread(server_thread, self._magma_service.loop) asyncio.ensure_future(future)
class ServiceManager: """ ServiceManager manages the service lifecycle and chaining of services for the Ryu apps. Ryu apps are loaded based on the services specified in the YAML config for static apps and mconfig for dynamic apps. ServiceManager also maintains a mapping between apps to the flow tables they use. Currently, its use cases include: - Starting all Ryu apps - Flow table number lookup for Ryu apps - Main & scratch tables management """ UE_MAC_ADDRESS_SERVICE_NAME = 'ue_mac' ARP_SERVICE_NAME = 'arpd' ACCESS_CONTROL_SERVICE_NAME = 'access_control' ipv6_solicitation_SERVICE_NAME = 'ipv6_solicitation' TUNNEL_LEARN_SERVICE_NAME = 'tunnel_learn' VLAN_LEARN_SERVICE_NAME = 'vlan_learn' IPFIX_SERVICE_NAME = 'ipfix' CONNTRACK_SERVICE_NAME = 'conntrack' RYU_REST_SERVICE_NAME = 'ryu_rest_service' RYU_REST_APP_NAME = 'ryu_rest_app' STARTUP_FLOWS_RECIEVER_CONTROLLER = 'startup_flows' CHECK_QUOTA_SERVICE_NAME = 'check_quota' LI_MIRROR_SERVICE_NAME = 'li_mirror' XWF_PASSTHRU_NAME = 'xwf_passthru' UPLINK_BRIDGE_NAME = 'uplink_bridge' CLASSIFIER_NAME = 'classifier' HE_CONTROLLER_NAME = 'proxy' NG_SERVICE_CONTROLLER_NAME = 'ng_services' INTERNAL_APP_SET_TABLE_NUM = 201 INTERNAL_IMSI_SET_TABLE_NUM = 202 INTERNAL_IPFIX_SAMPLE_TABLE_NUM = 203 INTERNAL_MAC_IP_REWRITE_TBL_NUM = 204 # Mapping between services defined in mconfig and the names and modules of # the corresponding Ryu apps in PipelineD. The module is used for the Ryu # app manager to instantiate the app. # Note that a service may require multiple apps. DYNAMIC_SERVICE_TO_APPS = { PipelineD.ENFORCEMENT: [ App( name=GYController.APP_NAME, module=GYController.__module__, type=GYController.APP_TYPE, order_priority=499, ), App( name=EnforcementController.APP_NAME, module=EnforcementController.__module__, type=EnforcementController.APP_TYPE, order_priority=500, ), App( name=EnforcementStatsController.APP_NAME, module=EnforcementStatsController.__module__, type=EnforcementStatsController.APP_TYPE, order_priority=501, ), ], PipelineD.DPI: [ App( name=DPIController.APP_NAME, module=DPIController.__module__, type=DPIController.APP_TYPE, order_priority=400, ), ], } # Mapping between the app names defined in pipelined.yml and the names and # modules of their corresponding Ryu apps in PipelineD. STATIC_SERVICE_TO_APPS = { UE_MAC_ADDRESS_SERVICE_NAME: [ App( name=UEMacAddressController.APP_NAME, module=UEMacAddressController.__module__, type=None, order_priority=0, ), ], ARP_SERVICE_NAME: [ App( name=ArpController.APP_NAME, module=ArpController.__module__, type=ArpController.APP_TYPE, order_priority=200, ), ], ACCESS_CONTROL_SERVICE_NAME: [ App( name=AccessControlController.APP_NAME, module=AccessControlController.__module__, type=AccessControlController.APP_TYPE, order_priority=400, ), ], HE_CONTROLLER_NAME: [ App( name=HeaderEnrichmentController.APP_NAME, module=HeaderEnrichmentController.__module__, type=HeaderEnrichmentController.APP_TYPE, order_priority=401, ), ], ipv6_solicitation_SERVICE_NAME: [ App( name=IPV6SolicitationController.APP_NAME, module=IPV6SolicitationController.__module__, type=IPV6SolicitationController.APP_TYPE, order_priority=210, ), ], TUNNEL_LEARN_SERVICE_NAME: [ App( name=TunnelLearnController.APP_NAME, module=TunnelLearnController.__module__, type=TunnelLearnController.APP_TYPE, order_priority=300, ), ], VLAN_LEARN_SERVICE_NAME: [ App( name=VlanLearnController.APP_NAME, module=VlanLearnController.__module__, type=VlanLearnController.APP_TYPE, order_priority=500, ), ], RYU_REST_SERVICE_NAME: [ App( name=RYU_REST_APP_NAME, module='ryu.app.ofctl_rest', type=None, order_priority=0, ), ], STARTUP_FLOWS_RECIEVER_CONTROLLER: [ App( name=StartupFlows.APP_NAME, module=StartupFlows.__module__, type=StartupFlows.APP_TYPE, order_priority=0, ), ], CHECK_QUOTA_SERVICE_NAME: [ App( name=CheckQuotaController.APP_NAME, module=CheckQuotaController.__module__, type=CheckQuotaController.APP_TYPE, order_priority=300, ), ], CONNTRACK_SERVICE_NAME: [ App( name=ConntrackController.APP_NAME, module=ConntrackController.__module__, type=ConntrackController.APP_TYPE, order_priority=700, ), ], IPFIX_SERVICE_NAME: [ App( name=IPFIXController.APP_NAME, module=IPFIXController.__module__, type=IPFIXController.APP_TYPE, order_priority=800, ), ], LI_MIRROR_SERVICE_NAME: [ App( name=LIMirrorController.APP_NAME, module=LIMirrorController.__module__, type=LIMirrorController.APP_TYPE, order_priority=900, ), ], XWF_PASSTHRU_NAME: [ App( name=XWFPassthruController.APP_NAME, module=XWFPassthruController.__module__, type=XWFPassthruController.APP_TYPE, order_priority=0, ), ], UPLINK_BRIDGE_NAME: [ App( name=UplinkBridgeController.APP_NAME, module=UplinkBridgeController.__module__, type=UplinkBridgeController.APP_TYPE, order_priority=0, ), ], CLASSIFIER_NAME: [ App( name=Classifier.APP_NAME, module=Classifier.__module__, type=Classifier.APP_TYPE, order_priority=0, ), ], # 5G Related services NG_SERVICE_CONTROLLER_NAME: [ App( name=NGServiceController.APP_NAME, module=NGServiceController.__module__, type=None, order_priority=0, ), ], } # Some apps do not use a table, so they need to be excluded from table # allocation. STATIC_APP_WITH_NO_TABLE = [ RYU_REST_APP_NAME, StartupFlows.APP_NAME, UplinkBridgeController.APP_NAME, NGServiceController.APP_NAME, ] def __init__(self, magma_service: MagmaService): self._magma_service = magma_service if 'enable5g_features' not in magma_service.config: self._5G_flag_enable = False else: self._5G_flag_enable = magma_service.config.get( 'enable5g_features') # inout is a mandatory app and it occupies: # table 1(for ingress) # table 10(for middle) # table 20(for egress) self._apps = [ App( name=InOutController.APP_NAME, module=InOutController.__module__, type=None, order_priority=0, ), ] self._table_manager = _TableManager() self.rule_id_mapper = RuleIDToNumMapper() self.session_rule_version_mapper = SessionRuleToVersionMapper() self.interface_to_prefix_mapper = InterfaceIDToPrefixMapper() self.restart_info_store = RestartInfoStore() apps = self._get_static_apps() apps.extend(self._get_dynamic_apps()) apps.sort(key=lambda x: x.order_priority) self._apps.extend(apps) # Filter out reserved apps and apps that don't need a table for app in apps: if app.name in self.STATIC_APP_WITH_NO_TABLE: continue # UE MAC service must be registered with Table 0 if app.name in [ self.UE_MAC_ADDRESS_SERVICE_NAME, self.XWF_PASSTHRU_NAME ]: self._table_manager.register_apps_for_table0_service([app]) continue if self._5G_flag_enable: if app.name in [self.CLASSIFIER_NAME]: self._table_manager.register_apps_for_table0_service([app]) continue self._table_manager.register_apps_for_service([app]) def _get_static_apps(self): """ _init_static_services populates app modules and allocates a main table for each static service. """ static_services = self._magma_service.config['static_services'] nat_enabled = self._magma_service.config.get('nat_enabled', False) setup_type = self._magma_service.config.get('setup_type', None) if setup_type == 'LTE': static_services.append(self.__class__.UPLINK_BRIDGE_NAME) logging.info("added uplink bridge controller") if self._5G_flag_enable: static_services.append(self.__class__.CLASSIFIER_NAME) static_services.append(self.__class__.NG_SERVICE_CONTROLLER_NAME) logging.info("added classifier and ng service controller") static_apps = \ [ app for service in static_services for app in self.STATIC_SERVICE_TO_APPS[service] ] return static_apps def _get_dynamic_apps(self): """ _init_dynamic_services populates app modules and allocates a main table for each dynamic service. """ dynamic_services = [] for service in self._magma_service.mconfig.services: if service not in self.DYNAMIC_SERVICE_TO_APPS: # Most likely cause: the config contains a deprecated # pipelined service. # Fix: update the relevant network's network_services settings. logging.warning( 'Mconfig contains unsupported network_services service: %s', service, ) continue dynamic_services.append(service) dynamic_apps = [ app for service in dynamic_services for app in self.DYNAMIC_SERVICE_TO_APPS[service] ] return dynamic_apps def load(self): """ Instantiates and schedules the Ryu app eventlets in the service eventloop. """ # Some setups might not use REDIS if self._magma_service.config['redis_enabled']: # Wait for redis as multiple controllers rely on it while not redisAvailable(self.rule_id_mapper.redis_cli): logging.warning("Pipelined waiting for redis...") time.sleep(1) self.rule_id_mapper.setup_redis() self.interface_to_prefix_mapper.setup_redis() manager = AppManager.get_instance() manager.load_apps([app.module for app in self._apps]) contexts = manager.create_contexts() contexts['rule_id_mapper'] = self.rule_id_mapper contexts[ 'session_rule_version_mapper'] = self.session_rule_version_mapper contexts[ 'interface_to_prefix_mapper'] = self.interface_to_prefix_mapper contexts['restart_info_store'] = self.restart_info_store contexts['app_futures'] = {app.name: Future() for app in self._apps} contexts['internal_ip_allocator'] = \ InternalIPAllocator(self._magma_service.config) contexts['config'] = self._magma_service.config contexts['mconfig'] = self._magma_service.mconfig contexts['loop'] = self._magma_service.loop contexts['service_manager'] = self sessiond_chan = ServiceRegistry.get_rpc_channel( 'sessiond', ServiceRegistry.LOCAL, ) mobilityd_chan = ServiceRegistry.get_rpc_channel( 'mobilityd', ServiceRegistry.LOCAL, ) contexts['rpc_stubs'] = { 'mobilityd': MobilityServiceStub(mobilityd_chan), 'sessiond': LocalSessionManagerStub(sessiond_chan), } if self._5G_flag_enable: contexts['rpc_stubs'].update({ 'sessiond_setinterface': \ SetInterfaceForUserPlaneStub(sessiond_chan), }) # Instantiate and schedule apps for app in manager.instantiate_apps(**contexts): # Wrap the eventlet in asyncio so it will stop when the loop is # stopped future = aioeventlet.wrap_greenthread( app, self._magma_service.loop, ) # Schedule the eventlet for evaluation in service loop asyncio.ensure_future(future) # In development mode, run server so that if environment.is_dev_mode(): server_thread = of_rest_server.start(manager) future = aioeventlet.wrap_greenthread( server_thread, self._magma_service.loop, ) asyncio.ensure_future(future) def get_table_num(self, app_name: str) -> int: """ Args: app_name: Name of the app Returns: The app's main table number """ return self._table_manager.get_table_num(app_name) def get_next_table_num(self, app_name: str) -> int: """ Args: app_name: Name of the app Returns: The main table number of the next service. If there are no more services after the current table, return the EGRESS table """ return self._table_manager.get_next_table_num(app_name) def is_app_enabled(self, app_name: str) -> bool: """ Args: app_name: Name of the app Returns: Whether or not the app is enabled """ return self._table_manager.is_app_enabled(app_name) def is_ng_app_enabled(self, app_name: str) -> bool: """ Args: app_name: Name of the app Returns: Whether or not the app is enabled """ if self._5G_flag_enable == False: return False return self._table_manager.is_ng_app_enabled(app_name) def allocate_scratch_tables(self, app_name: str, count: int) -> List[int]: """ Args: app_name: Each scratch table is associated with an app. This is used to help enforce scratch table isolation between apps. count: Number of scratch tables to be claimed Returns: List of scratch table numbers Raises: TableNumException if there are no more available tables """ return self._table_manager.allocate_scratch_tables(app_name, count) def get_scratch_table_nums(self, app_name: str) -> List[int]: """ Returns the scratch tables claimed by the given app. """ return self._table_manager.get_scratch_table_nums(app_name) def get_all_table_assignments(self): """ Returns: OrderedDict of app name to tables mapping, ordered by main table number, and app name. """ return self._table_manager.get_all_table_assignments()
def test_enforcement_restart(self): """ Adds rules using the setup feature 1) Empty SetupFlowsRequest - assert default flows 2) Add 2 imsis, add 2 policies(sub1_rule_temp, sub2_rule_keep), - assert everything is properly added 3) Same imsis 1 new policy, 1 old (sub2_new_rule, sub2_rule_keep) - assert everything is properly added 4) Empty SetupFlowsRequest - assert default flows """ self.enforcement_controller._rule_mapper = RuleIDToNumMapper() self.enforcement_stats_controller._rule_mapper = RuleIDToNumMapper() fake_controller_setup( enf_controller=self.enforcement_controller, enf_stats_controller=self.enforcement_stats_controller, startup_flow_controller=self.startup_flows_contoller, ) snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, 'default_flows', ) with snapshot_verifier: pass imsi1 = 'IMSI010000000088888' imsi2 = 'IMSI010000000012345' sub2_ip = '192.168.128.74' flow_list1 = [ FlowDescription( match=FlowMatch( ip_dst=convert_ipv4_str_to_ip_proto('45.10.0.0/24'), direction=FlowMatch.UPLINK, ), action=FlowDescription.PERMIT, ), FlowDescription( match=FlowMatch( ip_dst=convert_ipv4_str_to_ip_proto('45.11.0.0/24'), direction=FlowMatch.UPLINK, ), action=FlowDescription.PERMIT, ), ] flow_list2 = [ FlowDescription( match=FlowMatch( ip_dst=convert_ipv4_str_to_ip_proto('10.10.1.0/24'), direction=FlowMatch.UPLINK, ), action=FlowDescription.PERMIT, ), ] policies1 = [ VersionedPolicy( rule=PolicyRule(id='sub1_rule_temp', priority=2, flow_list=flow_list1), version=1, ), ] policies2 = [ VersionedPolicy( rule=PolicyRule(id='sub2_rule_keep', priority=3, flow_list=flow_list2), version=1, ), ] enf_stat_name = [ imsi1 + '|sub1_rule_temp' + '|' + sub2_ip, imsi2 + '|sub2_rule_keep' + '|' + sub2_ip, ] self.service_manager.session_rule_version_mapper.save_version( imsi1, convert_ipv4_str_to_ip_proto(sub2_ip), 'sub1_rule_temp', 1, ) self.service_manager.session_rule_version_mapper.save_version( imsi2, convert_ipv4_str_to_ip_proto(sub2_ip), 'sub2_rule_keep', 1, ) setup_flows_request = SetupFlowsRequest( requests=[ ActivateFlowsRequest( sid=SIDUtils.to_pb(imsi1), ip_addr=sub2_ip, policies=policies1, ), ActivateFlowsRequest( sid=SIDUtils.to_pb(imsi2), ip_addr=sub2_ip, policies=policies2, ), ], epoch=global_epoch, ) # Simulate clearing the dict self.service_manager.session_rule_version_mapper\ ._version_by_imsi_and_rule = {} fake_controller_setup( enf_controller=self.enforcement_controller, enf_stats_controller=self.enforcement_stats_controller, startup_flow_controller=self.startup_flows_contoller, setup_flows_request=setup_flows_request, ) sub_context = RyuDirectSubscriberContext( imsi2, sub2_ip, self.enforcement_controller, self._enforcement_tbl_num, ) isolator = RyuDirectTableIsolator( RyuForwardFlowArgsBuilder.from_subscriber( sub_context.cfg).build_requests(), self.testing_controller, ) pkt_sender = ScapyPacketInjector(self.IFACE) packets = IPPacketBuilder()\ .set_ip_layer('10.10.1.8/20', sub2_ip)\ .set_ether_layer(self.MAC_DEST, "00:00:00:00:00:00")\ .build() snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, 'before_restart', ) with isolator, snapshot_verifier: pkt_sender.send(packets) flow_list1 = [ FlowDescription( match=FlowMatch( ip_dst=convert_ipv4_str_to_ip_proto('24.10.0.0/24'), direction=FlowMatch.UPLINK, ), action=FlowDescription.PERMIT, ), ] policies = [ VersionedPolicy( rule=PolicyRule(id='sub2_new_rule', priority=2, flow_list=flow_list1), version=1, ), VersionedPolicy( rule=PolicyRule(id='sub2_rule_keep', priority=3, flow_list=flow_list2), version=1, ), ] enf_stat_name = [ imsi2 + '|sub2_new_rule' + '|' + sub2_ip + "|" + "1", imsi2 + '|sub2_rule_keep' + '|' + sub2_ip + "|" + "1", ] setup_flows_request = SetupFlowsRequest( requests=[ ActivateFlowsRequest( sid=SIDUtils.to_pb(imsi2), ip_addr=sub2_ip, policies=policies, ), ], epoch=global_epoch, ) fake_controller_setup( enf_controller=self.enforcement_controller, enf_stats_controller=self.enforcement_stats_controller, startup_flow_controller=self.startup_flows_contoller, setup_flows_request=setup_flows_request, ) snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, 'after_restart', ) with snapshot_verifier: pass fake_controller_setup( enf_controller=self.enforcement_controller, enf_stats_controller=self.enforcement_stats_controller, startup_flow_controller=self.startup_flows_contoller, ) snapshot_verifier = SnapshotVerifier( self, self.BRIDGE, self.service_manager, 'default_flows_w_packets', ) with snapshot_verifier: pass