def register(callback, agent_type): """Subscribe callback to init event for the specified agent. :param agent_type: an agent type as defined in neutron_lib.constants. :param callback: a callback that can process the agent init event. """ registry.subscribe(callback, agent_type, events.AFTER_INIT)
def test__flood_cache_for_query_pulls_once(self): resources = [OVOLikeThing(66), OVOLikeThing(67)] received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_UPDATE) self._pullmock.bulk_pull.side_effect = [ resources, [resources[0]], [resources[1]], [resources[1]] ] self.rcache._flood_cache_for_query('goose', id=(66, 67), name=('a', 'b')) self._pullmock.bulk_pull.assert_called_once_with( mock.ANY, 'goose', filter_kwargs={'id': (66, 67), 'name': ('a', 'b')}) self._pullmock.bulk_pull.reset_mock() self.rcache._flood_cache_for_query('goose', id=(66, ), name=('a', )) self.assertFalse(self._pullmock.called) self.rcache._flood_cache_for_query('goose', id=(67, ), name=('b', )) self.assertFalse(self._pullmock.called) # querying by just ID should trigger a new call since ID+name is a more # specific query self.rcache._flood_cache_for_query('goose', id=(67, )) self._pullmock.bulk_pull.assert_called_once_with( mock.ANY, 'goose', filter_kwargs={'id': (67, )}) self.assertItemsEqual( resources, [rec['updated'] for rec in received_kw])
def test_treat_devices_removed_notify(self): handler = mock.Mock() registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_DELETE) devices = [DEVICE_1] self.agent.treat_devices_removed(devices) handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, payload=mock.ANY)
def __init__(self, rcache): self.rcache = rcache registry.subscribe(self._clear_child_sg_rules, 'SecurityGroup', events.AFTER_DELETE) registry.subscribe(self._add_child_sg_rules, 'SecurityGroup', events.AFTER_UPDATE) # set this attr so agent can adjust the timeout of the client self.client = resources_rpc.ResourcesPullRpcApi().client
def setUp(self): super(TestStatusBarriers, self).setUp() self.setup_coreplugin(CORE_PLUGIN) self.ctx = n_ctx.get_admin_context() self.provisioned = mock.Mock() self.port = self._make_port() registry.subscribe(self.provisioned, resources.PORT, pb.PROVISIONING_COMPLETE)
def _unsubscribe_callback_events(self): # unsubscribe the callback that should be called on all plugins # other that NSX-T. registry.unsubscribe_all( l3_db.L3_NAT_dbonly_mixin._prevent_l3_port_delete_callback) # Instead we will subscribe our internal callback. registry.subscribe(self._prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE)
def register(): """Register the driver.""" global DRIVER DRIVER = OVSDriver.create() # To set the bridge_name in a parent port's vif_details. registry.subscribe(vif_details_bridge_name_handler, agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ) LOG.debug('Open vSwitch trunk driver registered')
def initialize(self): super(AristaHAScaleSimulationDriver, self).initialize() self.context = context.get_admin_context_without_session() # Subscribe to port updates to force ports to active after binding # since a fake virt driver is being used, so OVS will never see # the libvirt interfaces come up, triggering the OVS provisioning self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) registry.subscribe(self._port_update_callback, resources.PORT, events.AFTER_UPDATE)
def __init__(self, resource, object_class, resource_push_api): self._resource = resource self._obj_class = object_class self._resource_push_api = resource_push_api self._resources_to_push = {} self._worker_pool = eventlet.GreenPool() for event in (events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE): registry.subscribe(self.handle_event, resource, event)
def test_security_group_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP, events.PRECOMMIT_CREATE) with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback: self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group, self.ctx, FAKE_SECGROUP) self.assertTrue(mock_rollback.called)
def register_legacy_notification_callbacks(self, legacy_interface): """Emulates the server-side notifications from ml2 AgentNotifierApi. legacy_interface is an object with 'delete'/'update' methods for core resources. """ self._legacy_interface = legacy_interface for e in (callback_events.AFTER_UPDATE, callback_events.AFTER_DELETE): for r in (resources.PORT, resources.NETWORK): registry.subscribe(self._legacy_notifier, r, e)
def __new__(cls, *args, **kwargs): # NOTE(kevinbenton): we subscribe on object construction because # the tests blow away the callback manager for each run new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args, **kwargs) registry.subscribe(_ensure_external_network_default_value_callback, resources.NETWORK, events.PRECOMMIT_UPDATE) registry.subscribe(_ensure_external_network_default_value_callback, resources.NETWORK, events.PRECOMMIT_CREATE) return new
def test_record_resource_delete_ignores_dups(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_DELETE) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(1, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 4) self.assertEqual(2, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(2, len(received_kw))
def _register_subnet_events(self): # REVISIT(leyal): check if need to handle the events inside # neutron-db transaction function_by_action = { events.AFTER_CREATE: self._subnet_create_handler, events.AFTER_UPDATE: self._subnet_update_handler, events.AFTER_DELETE: self._subnet_delete_handler } for action, func in function_by_action.items(): registry.subscribe(func, resources.SUBNET, action)
def test_treat_devices_removed_notify(self): handler = mock.Mock() registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_DELETE) devices = [DEVICE_1] self.agent.treat_devices_removed(devices) handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, context=mock.ANY, device=DEVICE_1, port_id=mock.ANY)
def register(self, resource, event, trigger, **kwargs): super(OVNTrunkDriver, self).register( resource, event, trigger, **kwargs) self._handler = OVNTrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, trunk_consts.TRUNK, event) registry.subscribe(self._handler.subport_event, trunk_consts.SUBPORTS, event)
def test_record_resource_delete_ignores_dups(self): received_kw = [] receiver = lambda r, e, t, payload: \ received_kw.append(payload) registry.subscribe(receiver, 'goose', events.AFTER_DELETE) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(1, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 4) self.assertEqual(2, len(received_kw)) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(2, len(received_kw))
def initialize(self): self._nsxv = vcns_driver.VcnsDriver(None) self.init_profile_id() self.init_security_group() self.init_security_group_in_profile() # register an event to the end of the init to handle the first upgrade if self._is_new_security_group: registry.subscribe(self.init_complete, resources.PROCESS, events.BEFORE_SPAWN)
def _register_callbacks(self): registry.subscribe(self.floatingip_update_callback, resources.FLOATING_IP, events.AFTER_UPDATE) registry.subscribe(self.router_port_callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self.router_port_callback, resources.ROUTER_INTERFACE, events.AFTER_DELETE) registry.subscribe(self.router_port_callback, resources.ROUTER_GATEWAY, events.AFTER_CREATE) registry.subscribe(self.router_port_callback, resources.ROUTER_GATEWAY, events.AFTER_DELETE)
def test_adding_component_for_new_resource_type(self): provisioned = mock.Mock() registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE) net = self._make_net() # expect failed because the model was not registered for the type with testtools.ExpectedException(RuntimeError): pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent') pb.add_model_for_resource('NETWORK', models_v2.Network) pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent') pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent') self.assertTrue(provisioned.called)
def test__set_bridge_name_notify(self): def fake_callback(resource, event, trigger, **kwargs): trigger('fake-br-name') registry.subscribe(fake_callback, a_const.OVS_BRIDGE_NAME, events.BEFORE_READ) fake_vif_details = {} self.driver._set_bridge_name('foo', fake_vif_details) self.assertEqual( 'fake-br-name', fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, ''))
def register(self, resource, event, trigger, payload=None): super(OVNTrunkDriver, self).register(resource, event, trigger, payload=payload) self._handler = OVNTrunkHandler(self.plugin_driver) for trunk_event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, trunk_event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, trunk_event)
def test_start_all_workers(self): cfg.CONF.set_override('api_workers', 0) mock.patch.object(service, '_get_rpc_workers').start() mock.patch.object(service, '_get_plugins_workers').start() mock.patch.object(service, '_start_workers').start() callback = mock.Mock() registry.subscribe(callback, resources.PROCESS, events.AFTER_SPAWN) service.start_all_workers() callback.assert_called_once_with( resources.PROCESS, events.AFTER_SPAWN, mock.ANY)
def test_start_all_workers(self): cfg.CONF.set_override('api_workers', 0) mock.patch.object(service, '_get_rpc_workers').start() mock.patch.object(service, '_get_plugins_workers').start() mock.patch.object(service, '_start_workers').start() callback = mock.Mock() registry.subscribe(callback, resources.PROCESS, events.AFTER_SPAWN) service.start_all_workers() callback.assert_called_once_with(resources.PROCESS, events.AFTER_SPAWN, mock.ANY)
def __init__(self): """Initialize an RPC backend for the Neutron Server.""" self._skeleton = server.TrunkSkeleton() self._stub = server.TrunkStub() LOG.debug("RPC backend initialized for trunk plugin") for event_type in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self.process_event, resources.TRUNK, event_type) registry.subscribe(self.process_event, resources.SUBPORTS, event_type)
def register(self, resource, event, trigger, payload=None): super(NsxV3TrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = NsxV3TrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) LOG.debug("VMware NSXv3 trunk driver initialized.")
def register(self, resource, event, trigger, payload=None): super(NetAnsibleTrunkDriver, self).register(resource, event, trigger, payload=payload) self._handler = NetAnsibleTrunkHandler(self.plugin_driver) registry.subscribe(self._handler.subports_added, resources.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self._handler.subports_deleted, resources.SUBPORTS, events.AFTER_DELETE)
def __init__(self, notify_bound): super(EtcdAgentCommunicator, self).__init__() LOG.debug("Using etcd host:%s port:%s user:%s", cfg.CONF.ml2_vpp.etcd_host, cfg.CONF.ml2_vpp.etcd_port, cfg.CONF.ml2_vpp.etcd_user) # This is a function that is called when a port has been # notified from the agent via etcd as completely attached. # We call this when we're certain that the VPP on the far end # has definitely bound the port, and has dropped a vhost-user # socket where it can be found. # This is more important than it seems, becaus libvirt will # hang, because qemu ignores its monitor port, when qemu is # waiting for a partner to connect with on its vhost-user # interfaces. It can't start the VM - that requires # information from its partner it can't guess at - but it # shouldn't hang the monitor - nevertheless... So we notify # when the port is there and ready, and qemu is never put into # this state by Nova. self.notify_bound = notify_bound # We need certain directories to exist self.state_key_space = LEADIN + '/state' self.port_key_space = LEADIN + '/nodes' self.secgroup_key_space = LEADIN + '/global/secgroups' self.remote_group_key_space = LEADIN + '/global/remote_group' self.gpe_key_space = LEADIN + '/global/networks/gpe' etcd_client = self.client_factory.client() etcd_helper = etcdutils.EtcdHelper(etcd_client) etcd_helper.ensure_dir(self.state_key_space) etcd_helper.ensure_dir(self.port_key_space) etcd_helper.ensure_dir(self.secgroup_key_space) etcd_helper.ensure_dir(self.election_key_space) etcd_helper.ensure_dir(self.remote_group_key_space) self.secgroup_enabled = cfg.CONF.SECURITYGROUP.enable_security_group if self.secgroup_enabled: self.register_secgroup_event_handler() # TODO(ijw): .../state/<host> lists all known hosts, and they # heartbeat when they're functioning # From this point on, there are multiple threads: ensure that # we don't re-use the etcd_client from multiple threads # simultaneously etcd_helper = None etcd_client = None registry.subscribe(self.start_threads, resources.PROCESS, events.AFTER_SPAWN)
def subscribe(self): # Subscribe to the events related to security groups and rules registry.subscribe(self.create_security_group, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe(self.update_security_group, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe(self.delete_security_group, resources.SECURITY_GROUP, events.BEFORE_DELETE) registry.subscribe(self.create_security_group_rule, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe(self.delete_security_group_rule, resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE)
def test__set_bridge_name_notify(self): def fake_callback(resource, event, trigger, payload=None): trigger('fake-br-name') registry.subscribe(fake_callback, a_const.OVS_BRIDGE_NAME, events.BEFORE_READ) fake_vif_details = {} self.driver._set_bridge_name('foo', fake_vif_details) self.assertEqual( 'fake-br-name', fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, ''))
def register(self, resource, event, trigger, payload=None): super(NsxV3TrunkDriver, self).register(resource, event, trigger, payload=payload) self._handler = NsxV3TrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) LOG.debug("VMware NSXv3 trunk driver initialized.")
def test_security_groups_created_outside_transaction(self): def record_after_state(r, e, t, context, *args, **kwargs): self.was_active = context.session.is_active registry.subscribe(record_after_state, resources.SECURITY_GROUP, events.AFTER_CREATE) with self.subnet() as s: self.assertFalse(self.was_active) self._delete( 'security-groups', self._list('security-groups')['security_groups'][0]['id']) with self.port(subnet=s): self.assertFalse(self.was_active)
def __init__(self, service_plugin): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) self._nsxlib = self._core_plugin.nsxlib self._nsx_vpn = self._nsxlib.vpn_ipsec validator = ipsec_validator.IPsecV3Validator(service_plugin) super(NSXv3IPsecVpnDriver, self).__init__(service_plugin, validator) registry.subscribe(self._delete_local_endpoint, resources.ROUTER_GATEWAY, events.AFTER_DELETE)
def __init__(self): self._rpc_backend = None self._drivers = [] self._segmentation_types = {} self._interfaces = set() self._agent_types = set() drivers.register() registry.subscribe(rules.enforce_port_deletion_rules, resources.PORT, events.BEFORE_DELETE) registry.notify(constants.TRUNK_PLUGIN, events.AFTER_INIT, self) for driver in self._drivers: LOG.debug('Trunk plugin loaded with driver %s', driver.name) self.check_compatibility()
def test_security_group_rule_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, events.PRECOMMIT_CREATE) sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) fake_rule = FAKE_SECGROUP_RULE fake_rule['security_group_rule']['security_group_id'] = sg_dict['id'] with mock.patch.object(sqlalchemy.orm.session.SessionTransaction, 'rollback') as mock_rollback,\ mock.patch.object(self.mixin, '_get_security_group'): self.assertRaises(securitygroup.SecurityGroupConflict, self.mixin.create_security_group_rule, self.ctx, fake_rule) self.assertTrue(mock_rollback.called)
def test_ovs_restarted_event(self): callback = mock.Mock() self.setup_agent_and_ports(port_dicts=self.create_test_ports()) registry.subscribe(callback, resources.AGENT, events.OVS_RESTARTED) self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED utils.wait_until_true(lambda: callback.call_count, timeout=10) callback.assert_called_with(resources.AGENT, events.OVS_RESTARTED, mock.ANY)
def __init__(self): self._rpc_backend = None self._drivers = [] self._segmentation_types = {} self._interfaces = set() self._agent_types = set() drivers.register() registry.subscribe(rules.enforce_port_deletion_rules, resources.PORT, events.BEFORE_DELETE) registry.publish(resources.TRUNK_PLUGIN, events.AFTER_INIT, self) for driver in self._drivers: LOG.debug('Trunk plugin loaded with driver %s', driver.name) self.check_compatibility()
def __init__(self): super(BGPVPNPlugin, self).__init__() # Need to look into /etc/neutron/networking_bgpvpn.conf for # service_provider definitions: service_type_manager = st_db.ServiceTypeManager.get_instance() service_type_manager.add_provider_configuration( bgpvpn_def.LABEL, pconf.ProviderConfiguration('networking_bgpvpn')) # Load the default driver drivers, default_provider = service_base.load_drivers( bgpvpn_def.LABEL, self) LOG.info("BGP VPN Service Plugin using Service Driver: %s", default_provider) self.driver = drivers[default_provider] if len(drivers) > 1: LOG.warning("Multiple drivers configured for BGPVPN, although" "running multiple drivers in parallel is not yet" "supported") registry.subscribe(self._notify_adding_interface_to_router, resources.ROUTER_INTERFACE, events.BEFORE_CREATE) registry.subscribe(self._notify_host_updated, n_resources.HOST, events.AFTER_UPDATE) registry.subscribe(self._notify_agent_updated, resources.AGENT, events.AFTER_UPDATE) registry.subscribe(self._notify_removing_vpn_from_speaker, dr_resources.BGP_SPEAKER_VPN_ASSOC, events.AFTER_DELETE)
def test_record_resource_delete(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_DELETE) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large')) self.rcache.record_resource_delete(self.ctx, 'goose', 3) self.assertEqual(1, len(received_kw)) self.assertEqual(3, received_kw[0]['existing'].id) self.assertEqual(3, received_kw[0]['resource_id']) # deletes of non-existing cache items are still honored self.rcache.record_resource_delete(self.ctx, 'goose', 4) self.assertEqual(2, len(received_kw)) self.assertIsNone(received_kw[1]['existing']) self.assertEqual(4, received_kw[1]['resource_id'])
def __init__(self, resource, object_class, resource_push_api): self._resource = resource self._obj_class = object_class self._resource_push_api = resource_push_api self._resources_to_push = {} # NOTE(annp): uWSGI seems not happy with eventlet.GreenPool. # So switching to ThreadPool self._worker_pool = futurist.ThreadPoolExecutor() self.fts = [] self._semantic_warned = False for event in (events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE): registry.subscribe(self.handle_event, resource, event)
def register(self, resource, event, trigger, **kwargs): super(NuageTrunkDriver, self).register(resource, event, trigger, **kwargs) self._handler = NuageTrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) registry.subscribe(self._handler._trunk_status_change, resources.PORT, events.AFTER_UPDATE) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, events.PRECOMMIT_CREATE) registry.subscribe(self._handler.trunk_event, resources.TRUNK, events.PRECOMMIT_CREATE)
def subscribe(): registry.subscribe(vpn_router_gateway_callback, resources.ROUTER_GATEWAY, events.BEFORE_DELETE) registry.subscribe(vpn_router_gateway_callback, resources.ROUTER_INTERFACE, events.BEFORE_DELETE) registry.subscribe(migration_callback, resources.ROUTER, events.BEFORE_UPDATE) registry.subscribe(subnet_callback, resources.SUBNET, events.BEFORE_DELETE)
def _register_postcommit_callbacks(self): registry.subscribe(self.update_floatingip_postcommit, resources.FLOATING_IP, events.AFTER_UPDATE) registry.subscribe(self.delete_floatingip_postcommit, resources.FLOATING_IP, events.AFTER_DELETE) registry.subscribe(self.update_router_gateway_postcommit, resources.ROUTER_GATEWAY, events.AFTER_CREATE) registry.subscribe(self.update_router_gateway_postcommit, resources.ROUTER_GATEWAY, events.AFTER_DELETE)
def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_CREATE) registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_UPDATE) registry.subscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) registry.subscribe(_delete_segments_for_network, resources.NETWORK, events.PRECOMMIT_DELETE)
def __init__(self, resource, object_class, resource_push_api): self._resource = resource self._obj_class = object_class self._resource_push_api = resource_push_api self._resources_to_push = queue.Queue() self._semantic_warned = False for event in (events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE): registry.subscribe(self.handle_event, resource, event) self._stop = threading.Event() self._worker = threading.Thread( target=self.dispatch_events, name='ObjectChangeHandler[%s]' % self._resource, daemon=True) self._worker.start() self._TO_CLEAN.add(self)
def test_ovs_restarted_event(self): callback = mock.Mock() self.setup_agent_and_ports( port_dicts=self.create_test_ports()) registry.subscribe(callback, resources.AGENT, events.OVS_RESTARTED) self.agent.check_ovs_status.return_value = constants.OVS_RESTARTED utils.wait_until_true(lambda: callback.call_count, timeout=10) callback.assert_called_with(resources.AGENT, events.OVS_RESTARTED, mock.ANY)
def initialize(self): super(NsxPolicyMappingDriver, self).initialize() self._gbp_plugin = None self.nsx_policy = self.get_nsxpolicy_lib() # reinitialize the cluster upon fork for api workers to ensure each # process has its own keepalive loops + state registry.subscribe(self.nsx_policy.reinitialize_cluster, resources.PROCESS, events.AFTER_INIT) self.policy_api = self.nsx_policy.policy_api self.nsx_manager = self.get_nsxmanager_lib() registry.subscribe(self.nsx_manager.reinitialize_cluster, resources.PROCESS, events.AFTER_INIT) self.nsx_port = nsx_resources.LogicalPort(self.nsx_manager.client) self._verify_enforcement_point()
def __init__(self): super(VppTrunkPlugin, self).__init__() self.communicator = JournalManager() # Supported segmentation type is VLAN self._segmentation_types = { trunk_const.VLAN: plugin_utils.is_valid_vlan_tag } # This is needed to prevent deletion of trunk's parent or sub port # without first deleting the trunk itself registry.subscribe(rules.enforce_port_deletion_rules, resources.PORT, events.BEFORE_DELETE) # Subscribe to trunk parent-port binding events # We use this event to trigger the etcd trunk key update. registry.subscribe(self._trigger_etcd_trunk_update, resources.PORT, events.AFTER_UPDATE) registry.notify(trunk_const.TRUNK_PLUGIN, events.AFTER_INIT, self) LOG.debug('vpp-trunk: vpp trunk service plugin has initialized')
def register_legacy_sg_notification_callbacks(self, sg_agent): self._sg_agent = sg_agent registry.subscribe(self._handle_sg_rule_delete, 'SecurityGroupRule', events.AFTER_DELETE) registry.subscribe(self._handle_sg_rule_update, 'SecurityGroupRule', events.AFTER_UPDATE) registry.subscribe(self._handle_sg_member_delete, 'Port', events.AFTER_DELETE) registry.subscribe(self._handle_sg_member_update, 'Port', events.AFTER_UPDATE)
def __init__(self, service_plugin): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) self._nsxlib = self._core_plugin.nsxlib self._nsx_vpn = self._nsxlib.vpn_ipsec validator = ipsec_validator.IPsecV3Validator(service_plugin) super(NSXv3IPsecVpnDriver, self).__init__(service_plugin, validator) registry.subscribe( self._delete_local_endpoint, resources.ROUTER_GATEWAY, events.AFTER_DELETE) registry.subscribe( self._verify_overlap_subnet, resources.ROUTER_INTERFACE, events.BEFORE_CREATE)
def initialize(self): super(NsxPolicyMappingDriver, self).initialize() self._gbp_plugin = None self.nsx_policy = self.get_nsxpolicy_lib() # reinitialize the cluster upon fork for api workers to ensure each # process has its own keepalive loops + state registry.subscribe( self.nsx_policy.reinitialize_cluster, resources.PROCESS, events.AFTER_INIT) self.policy_api = self.nsx_policy.policy_api self.nsx_manager = self.get_nsxmanager_lib() registry.subscribe( self.nsx_manager.reinitialize_cluster, resources.PROCESS, events.AFTER_INIT) self.nsx_port = nsx_resources.LogicalPort(self.nsx_manager.client) self._verify_enforcement_point()
def test_record_resource_update(self): received_kw = [] receiver = lambda *a, **k: received_kw.append(k) registry.subscribe(receiver, 'goose', events.AFTER_UPDATE) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large')) self.assertEqual(1, len(received_kw)) self.assertIsNone(received_kw[0]['existing']) # another update with no changed fields results in no callback self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='large', revision_number=100)) self.assertEqual(1, len(received_kw)) self.rcache.record_resource_update(self.ctx, 'goose', OVOLikeThing(3, size='small', revision_number=101)) self.assertEqual(2, len(received_kw)) self.assertEqual('large', received_kw[1]['existing'].size) self.assertEqual('small', received_kw[1]['updated'].size) self.assertEqual(set(['size']), received_kw[1]['changed_fields'])
def test_treat_devices_added_updated_notify(self): handler = mock.Mock() registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_UPDATE) agent = self.agent mock_details = {'device': 'dev123', 'port_id': 'port123', 'network_id': 'net123', 'admin_state_up': True, 'network_type': 'vlan', 'segmentation_id': 100, 'physical_network': 'physnet1', 'device_owner': 'horse'} agent.plugin_rpc = mock.Mock() agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] agent.mgr = mock.Mock() agent.mgr.plug_interface.return_value = True agent.treat_devices_added_updated(set(['dev123'])) handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, context=mock.ANY, device_details=mock_details)