def __init__(self, *args, **kwargs): super(DNATApp, self).__init__(*args, **kwargs) self.external_network_bridge = \ cfg.CONF.df_dnat_app.external_network_bridge self.external_bridge_mac = "" self.integration_bridge = cfg.CONF.df.integration_bridge self.conf = cfg.CONF.df_dnat_app self.int_peer_patch_port = self.conf.int_peer_patch_port self.ex_peer_patch_port = self.conf.ex_peer_patch_port self.external_networks = collections.defaultdict(int) self.local_floatingips = collections.defaultdict(str) # Map between fixed ip mac to floating ip self.floatingip_rarp_cache = {} self.egress_ttl_invalid_handler_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_ttl_invalid_max_rate, time_unit=1) self.ingress_ttl_invalid_handler_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_ttl_invalid_max_rate, time_unit=1) self.egress_icmp_error_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_icmp_error_max_rate, time_unit=1) self.ingress_icmp_error_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_icmp_error_max_rate, time_unit=1) self.api.register_table_handler(const.INGRESS_NAT_TABLE, self.ingress_packet_in_handler) self.api.register_table_handler(const.EGRESS_NAT_TABLE, self.egress_packet_in_handler)
def __init__(self, *args, **kwargs): super(L3AppMixin, self).__init__() self.route_cache = {} self.conf = cfg.CONF.df_l3_app self.ttl_invalid_handler_rate_limit = df_utils.RateLimiter( max_rate=self.conf.router_ttl_invalid_max_rate, time_unit=1) self.port_icmp_unreach_respond_rate_limit = df_utils.RateLimiter( max_rate=self.conf.router_port_unreach_max_rate, time_unit=1) self.api.register_table_handler(const.L3_LOOKUP_TABLE, self.packet_in_handler)
def _install_dhcp_flow_for_vm_port(self, lport): """Install dhcp flow in DHCP_TABLE for a port of vm.""" unique_key = lport.unique_key port_rate_limiter = df_utils.RateLimiter( max_rate=self.conf.df_dhcp_max_rate_per_sec, time_unit=1) self.unique_key_to_dhcp_app_port_data[unique_key] = (port_rate_limiter, lport) LOG.info("Register VM as DHCP client::port <%s>", lport.id) parser = self.parser ofproto = self.ofproto match = parser.OFPMatch(reg6=unique_key) actions = [ parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER) ] inst = [ parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions) ] self.mod_flow(inst=inst, table_id=const.DHCP_TABLE, priority=const.PRIORITY_MEDIUM, match=match)
def __init__(self, chassis_name, nb_api): chassis_name = cfg.CONF.host self.db_store = db_store.get_instance() self.chassis_name = chassis_name self.nb_api = nb_api self.ip = cfg.CONF.df.local_ip app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate( ryu_base_app.RyuDFAdapter, nb_api=self.nb_api, vswitch_api=None, neutron_server_notifier=None, ) # The OfctlService is needed to support the 'get_flows' method self.open_flow_service = app_mgr.instantiate(of_service.OfctlService) self.ryu_switches = app_mgr.instantiate(switches.Switches) self.simples_switch = app_mgr.instantiate(concept_app.SimpleSwitch13) self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def __init__(self, *args, **kwargs): super(DNATApp, self).__init__(*args, **kwargs) self.conf = cfg.CONF.df_dnat_app # Map between fixed ip mac to floating ip self.egress_ttl_invalid_handler_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_ttl_invalid_max_rate, time_unit=1) self.ingress_ttl_invalid_handler_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_ttl_invalid_max_rate, time_unit=1) self.egress_icmp_error_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_icmp_error_max_rate, time_unit=1) self.ingress_icmp_error_rate_limit = df_utils.RateLimiter( max_rate=self.conf.dnat_icmp_error_max_rate, time_unit=1) self.api.register_table_handler(const.INGRESS_DNAT_TABLE, self.ingress_packet_in_handler) self.api.register_table_handler(const.EGRESS_DNAT_TABLE, self.egress_packet_in_handler) self.lports_in_process = set()
def __init__(self): self._queue = Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() nb_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class) self.db = nb_driver_class() self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter(2, 30)
def test_rate_limiter_continuus(self): rate_limiter = utils.RateLimiter(3, 5) counter = 0 for idx in range(11): if not rate_limiter(): counter += 1 time.sleep(1) self.assertEqual(counter, 7)
def __init__(self, nb_api): self._queue = queue.Queue() self.publisher = _get_publisher() self.subscriber = self._get_subscriber() self.nb_api = nb_api self.db = self.nb_api.driver self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
def test_rate_limiter_oneshot(self): rate_limiter = utils.RateLimiter(3, 5) counter = 0 for idx in range(5): if not rate_limiter(): counter += 1 self.assertEqual(counter, 3) time.sleep(5) for idx in range(5): if not rate_limiter(): counter += 1 self.assertEqual(counter, 6)
def __init__(self): self._queue = queue.Queue() self.publisher = self._get_publisher() self.multiproc_subscriber = self._get_multiproc_subscriber() self.db = df_utils.load_driver( cfg.CONF.df.nb_db_class, df_utils.DF_NB_DB_DRIVER_NAMESPACE) self.uuid = pub_sub_api.generate_publisher_uuid() self._rate_limit = df_utils.RateLimiter( cfg.CONF.df.publisher_rate_limit_count, cfg.CONF.df.publisher_rate_limit_timeout, )
def __init__(self, chassis_name, nb_api): #用于缓存obj self.db_store = db_store.get_instance() self._queue = queue.PriorityQueue() # pending_id -> (model, pender_id) # 'pending_id' is the ID of the object for which we are waiting. # 'model' and 'pender_id' are the model and the ID of the object # which is waiting for the object described by 'pending_id' self._pending_objects = collections.defaultdict(set) self.chassis_name = chassis_name #北向api self.nb_api = nb_api #指定database change event的处理函数 self.nb_api.set_db_change_callback(self.db_change_callback) #指明本主机ip地址 self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) #加载switch_backend self.switch_backend = df_utils.load_driver( cfg.CONF.df.switch_backend, df_utils.DF_SWITCH_BACKEND_DRIVER_NAMESPACE, nb_api, cfg.CONF.df.management_ip) #switch_backend初始化 self.switch_backend.initialize(self.db_change_callback, self.neutron_notifier) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, #指明同步对应的update,delete回调 update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) #周期性产生controller_sync事件 self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def __init__(self, chassis_name, nb_api): self.db_store = db_store.get_instance() self._queue = queue.PriorityQueue() # pending_id -> (model, pender_id) # 'pending_id' is the ID of the object for which we are waiting. # 'model' and 'pender_id' are the model and the ID of the object # which is waiting for the object described by 'pending_id' self._pending_objects = collections.defaultdict(set) self.chassis_name = chassis_name self.nb_api = nb_api self.nb_api.set_db_change_callback(self.db_change_callback) self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types self.sync_finished = False self.vswitch_api = vswitch_impl.OvsApi(cfg.CONF.df.management_ip) self.neutron_notifier = None if cfg.CONF.df.enable_neutron_notifier: self.neutron_notifier = df_utils.load_driver( cfg.CONF.df.neutron_notifier, df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) app_mgr = app_manager.AppManager.get_instance() self.open_flow_app = app_mgr.instantiate( ryu_base_app.RyuDFAdapter, nb_api=self.nb_api, vswitch_api=self.vswitch_api, neutron_server_notifier=self.neutron_notifier, db_change_callback=self.db_change_callback ) # The OfctlService is needed to support the 'get_flows' method self.open_flow_service = app_mgr.instantiate(of_service.OfctlService) self.topology = None self.enable_selective_topo_dist = \ cfg.CONF.df.enable_selective_topology_distribution self._sync = sync.Sync( nb_api=self.nb_api, update_cb=self.update, delete_cb=self.delete, selective=self.enable_selective_topo_dist, ) self._sync_pulse = loopingcall.FixedIntervalLoopingCall( self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def add_local_port(self, lport): network_id = lport.get_external_value('local_network_id') if self.get_datapath() is None: return if not netaddr.valid_ipv4(lport.get_ip()): LOG.warning(_LW("No support for non IPv4 protocol")) return lport_id = lport.get_id() tunnel_key = lport.get_tunnel_key() ofport = lport.get_external_value('ofport') port_rate_limiter = df_utils.RateLimiter( max_rate=self.conf.df_dhcp_max_rate_per_sec, time_unit=1) self.local_tunnel_to_pid_map[tunnel_key] = (port_rate_limiter, ofport, lport_id) if not self._is_dhcp_enabled_on_network(lport, network_id): return if not self._is_port_a_vm(lport): return LOG.info(_LI("Register VM as DHCP client::port <%s>") % lport.get_id()) ofport = lport.get_external_value('ofport') parser = self.get_datapath().ofproto_parser ofproto = self.get_datapath().ofproto match = parser.OFPMatch() match.set_in_port(ofport) actions = [] actions.append(parser.OFPActionSetField(metadata=tunnel_key)) actions.append(parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)) inst = [self.get_datapath().ofproto_parser.OFPInstructionActions( ofproto.OFPIT_APPLY_ACTIONS, actions)] self.mod_flow( self.get_datapath(), inst=inst, table_id=const.DHCP_TABLE, priority=const.PRIORITY_MEDIUM, match=match)
def __init__(self, chassis_name, nb_api): self.db_store = db_store.get_instance() self._queue = queue.PriorityQueue() self.chassis_name = chassis_name self.nb_api = nb_api self.nb_api.set_db_change_callback(self.db_change_callback) self.ip = cfg.CONF.df.local_ip # Virtual tunnel port support multiple tunnel types together self.tunnel_types = cfg.CONF.df.tunnel_types # self.sync_finished = False # self.vswitch_api = vswitch_impl.OvsApi(cfg.CONF.df.management_ip) # self.neutron_notifier = None # if cfg.CONF.df.enable_neutron_notifier: # self.neutron_notifier = df_utils.load_driver( # cfg.CONF.df.neutron_notifier, # df_utils.DF_NEUTRON_NOTIFIER_DRIVER_NAMESPACE) # app_mgr = app_manager.AppManager.get_instance() # self.open_flow_app = app_mgr.instantiate( # ryu_base_app.RyuDFAdapter, # nb_api=self.nb_api, # vswitch_api=self.vswitch_api, # neutron_server_notifier=self.neutron_notifier, # db_change_callback=self.db_change_callback # ) # The OfctlService is needed to support the 'get_flows' method # self.open_flow_service = app_mgr.instantiate(of_service.OfctlService) # self.topology = None # self.enable_selective_topo_dist = \ # cfg.CONF.df.enable_selective_topology_distribution # self._sync = sync.Sync( # nb_api=self.nb_api, # update_cb=self.update, # delete_cb=self.delete, # selective=self.enable_selective_topo_dist, # ) # self._sync_pulse = loopingcall.FixedIntervalLoopingCall( # self._submit_sync_event) self.sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL)
def _read_db_changes_from_queue(self): sync_rate_limiter = df_utils.RateLimiter( max_rate=1, time_unit=db_common.DB_SYNC_MINIMUM_INTERVAL) while True: self.next_update = self._queue.get(block=True) LOG.debug("Event update: %s", self.next_update) try: value = self.next_update.value if (not value and self.next_update.action not in {'delete', 'log', 'dbrestart'}): if self.next_update.table and self.next_update.key: value = self.driver.get_key(self.next_update.table, self.next_update.key) self.apply_db_change(self.next_update.table, self.next_update.key, self.next_update.action, value) except Exception as e: if "ofport is 0" not in e.message: LOG.exception(e) if not sync_rate_limiter(): self.apply_db_change(None, None, 'sync', None) self._queue.task_done()