def test_model_update_port_rollback(self): """Test for proper rollback for Cisco model layer update port failure. Test that the vSwitch plugin port configuration is rolled back (restored) by the Cisco plugin model layer when there is a failure in the Nexus sub-plugin for an update port operation. """ with self.port(fmt=self.fmt) as orig_port: inserted_exc = ValueError with mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create', side_effect=inserted_exc): # Send an update port request with a new device ID device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" if orig_port['port']['device_id'] == device_id: device_id = "600df00d-e4a8-4a3a-8906-feed600df00d" data = {'port': {'device_id': device_id}} port_id = orig_port['port']['id'] req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) # Sanity check failure result code self._assertExpectedHTTP(res.status_int, inserted_exc) # Check that the port still has the original device ID plugin = base_plugin.NeutronDbPluginV2() ctx = context.get_admin_context() db_port = plugin._get_port(ctx, port_id) self.assertEqual(db_port['device_id'], orig_port['port']['device_id'])
def initialize(self): # Extend extension to service mapping dict # TODO(sakvarma) Check if this mapping can be removed p_const.EXT_TO_SERVICE_MAPPING['cisco_n1kv_profile'] = ( n1kv_const.CISCO_N1KV) self.n1kvclient = n1kv_client.Client() self.sync_obj = n1kv_sync.N1kvSyncDriver( db_base_plugin_v2.NeutronDbPluginV2()) eventlet.spawn(self.sync_obj.do_sync) # Get VLAN/VXLAN network profiles name self.netp_name = { p_const.TYPE_VLAN: (n1kv_const.DEFAULT_VLAN_NETWORK_PROFILE_NAME), p_const.TYPE_VXLAN: (n1kv_const.DEFAULT_VXLAN_NETWORK_PROFILE_NAME) } # Ensure network profiles are created on the VSM try: self._ensure_network_profiles_created_on_vsm() except (n1kv_exc.VSMConnectionFailed, n1kv_exc.VSMError): LOG.error(_LE("VSM failed to create default network profiles.")) self.vif_type = portbindings.VIF_TYPE_OVS self.vif_details = { portbindings.CAP_PORT_FILTER: True, portbindings.OVS_HYBRID_PLUG: True } self.supported_network_types = [p_const.TYPE_VLAN, p_const.TYPE_VXLAN]
def get_tenants(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): tenants = self._get_all_tenant(context) result_t = [] l3_plugin = self.get_l3_plugin() if 'id' in filters: tenants = filters.pop('id') for tenant in tenants: counter = {} counter['id'] = tenant for resource in ['ports', 'networks', 'floatingips', 'subnets', 'routers']: method = "get_" + resource filters['tenant_id'] = [tenant] if resource in ['ports', 'networks','subnets']: method_r = getattr(db_base_plugin_v2.NeutronDbPluginV2(), method) else: method_r = getattr(l3_plugin, method) result = method_r(context,filters=filters,fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) r_counter = resource + '_count' counter.update({r_counter:len(result)}) result_t.append(counter) return result_t
def list_missing_networks(resource, event, trigger, **kwargs): """List neutron networks that are missing the NSX backend network """ plugin = db_base_plugin_v2.NeutronDbPluginV2() admin_cxt = neutron_context.get_admin_context() neutron_networks = plugin.get_networks(admin_cxt) networks = [] for net in neutron_networks: neutron_id = net['id'] # get the network nsx id from the mapping table nsx_id = get_network_nsx_id(admin_cxt, neutron_id) if not nsx_id: # skip external networks pass else: try: utils.get_connected_nsxlib().logical_switch.get(nsx_id) except nsx_exc.ResourceNotFound: networks.append({ 'name': net['name'], 'neutron_id': neutron_id, 'nsx_id': nsx_id }) if len(networks) > 0: title = _LI("Found %d internal networks missing from the NSX " "manager:") % len(networks) LOG.info( formatters.output_formatter(title, networks, ['name', 'neutron_id', 'nsx_id'])) else: LOG.info(_LI("All internal networks exist on the NSX manager"))
def _test_update_subnetpool_address_scope_notify(self, as_change=True): with self.address_scope(name='foo-address-scope') as addr_scope: foo_as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') initial_subnetpool = self._test_create_subnetpool( [subnet.cidr], name='foo-sp', min_prefixlen='21', address_scope_id=foo_as_id) subnetpool_id = initial_subnetpool['subnetpool']['id'] with self.address_scope(name='bar-address-scope') as other_as, \ self.network() as network: data = { 'subnet': { 'network_id': network['network']['id'], 'subnetpool_id': subnetpool_id, 'prefixlen': 24, 'ip_version': constants.IP_VERSION_4, 'tenant_id': network['network']['tenant_id'] } } req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, req.get_response(self.api)) with mock.patch.object(registry, 'publish') as publish: plugin = db_base_plugin_v2.NeutronDbPluginV2() plugin.is_address_scope_owned_by_tenant = mock.Mock( return_value=True) plugin._validate_address_scope_id = mock.Mock() ctx = context.get_admin_context() bar_as_id = other_as['address_scope']['id'] data = {'subnetpool': {'name': 'bar-sp'}} if as_change: data['subnetpool']['address_scope_id'] = bar_as_id updated_sp = plugin.update_subnetpool( ctx, subnetpool_id, data) self.assertEqual('bar-sp', updated_sp['name']) if as_change: self.assertEqual(bar_as_id, updated_sp['address_scope_id']) publish.assert_called_once_with( resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, plugin.update_subnetpool, payload=mock.ANY) payload = publish.mock_calls[0][2]['payload'] self.assertEqual(ctx, payload.context) self.assertEqual(subnetpool_id, payload.resource_id) else: self.assertEqual(foo_as_id, updated_sp['address_scope_id']) self.assertFalse(publish.called)
def __init__(self, api_url, username, password, neutron_id, exclude_physical_networks, dry_run=False): self.rest_client = RestClient(api_url, username, password) self.keystone_client = KeystoneClient() self.db_plugin = db_base_plugin_v2.NeutronDbPluginV2() self.neutron_id = neutron_id self.exclude_physical_networks = exclude_physical_networks self.dry_run = dry_run if self.dry_run: LOG.info("This is dry run mode.") self.rest_sleep = 0.5
def configure_test(self): model_base.BASEV2.metadata.create_all(self.engine) cfg.CONF.set_override('notify_nova_on_port_status_changes', False) self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = get_admin_test_context(self.engine.url) self.addCleanup(self.cxt._session.close) self.tenant_id = 'test_tenant' self.network_id = 'test_net_id' self.subnet_id = 'test_sub_id' self.port_id = 'test_p_id' self._create_network() self._create_subnet()
def __init__( self, minimize_polling=False, quitting_rpc_timeout=None, conf=None, aci_monitor_respawn_interval=(aci_constants.DEFAULT_ACI_RESPAWN)): self.conf = aci_config.CONF self.aci_config = self.conf.ml2_aci self.network_config = { 'hostgroup_dict': aci_config.create_hostgroup_dictionary(), 'address_scope_dict': aci_config.create_addressscope_dictionary() } self.host_group_config = self.network_config['hostgroup_dict'] self.tenant_manager = driver.DriverManager( namespace='aci.tenant.managers', name=self.aci_config.tenant_manager, invoke_on_load=True).driver self.db = db.NeutronDbPluginV2() self.tag_plugin = tag_plugin.TagPlugin() self.aci_manager = cobra_manager.CobraManager(self.network_config, self.aci_config, self.tenant_manager) self.aci_monitor_respawn_interval = aci_monitor_respawn_interval self.minimize_polling = minimize_polling, self.polling_interval = 10 self.iter_num = 0 self.run_daemon_loop = True self.quitting_rpc_timeout = quitting_rpc_timeout self.catch_sigterm = False self.catch_sighup = False host = self.conf.host self.agent_id = 'aci-agent-%s' % host self.setup_rpc() self.agent_state = { 'binary': 'neutron-aci-agent', 'host': host, 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {}, 'agent_type': aci_constants.ACI_AGENT_TYPE, 'start_flag': True } self.connection.consume_in_threads()
def _test__allocate_ips_for_port(self, subnets, port, expected): # this test is incompatible with pluggable ipam, because subnets # were not actually created, so no ipam_subnet exists cfg.CONF.set_override("ipam_driver", None) plugin = db_base_plugin_v2.NeutronDbPluginV2() with mock.patch.object(ipam_backend_mixin.IpamBackendMixin, '_ipam_get_subnets') as get_subnets: with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_check_unique_ip') as check_unique: context = mock.Mock() get_subnets.return_value = subnets check_unique.return_value = True actual = plugin.ipam._allocate_ips_for_port(context, port) self.assertEqual(expected, actual)
def setUp(self): super(IpamTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' self.setup_coreplugin(DB_PLUGIN_KLASS) self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) self.tenant_id = 'test_tenant' self.network_id = 'test_net_id' self.subnet_id = 'test_sub_id' self.port_id = 'test_p_id' self._create_network() self._create_subnet()
def setUp(self): super(NetworkRBACTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' self.setup_coreplugin(DB_PLUGIN_KLASS) self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) self.tenant_1 = uuidutils.generate_uuid() self.tenant_2 = uuidutils.generate_uuid() self.network_id = uuidutils.generate_uuid() self.subnet_1_id = uuidutils.generate_uuid() self.subnet_2_id = uuidutils.generate_uuid() self.port_id = uuidutils.generate_uuid()
def setUp(self): super(IpamTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) if self.use_pluggable_ipam: self._turn_on_pluggable_ipam() else: self._turn_off_pluggable_ipam() self.plugin = base_plugin.NeutronDbPluginV2() self.cxt = context.Context(user_id=None, tenant_id=None, is_admin=True, overwrite=False) self.tenant_id = 'test_tenant' self.network_id = 'test_net_id' self.subnet_id = 'test_sub_id' self.port_id = 'test_p_id' self._create_network() self._create_subnet()
def __init__(self): bigswitch_config.register_config() networking_bigswitch_l3_pe.lib.config.register_config() api_url = cfg.CONF.networking_bigswitch_l3_pe.api_url username, password = cfg.CONF.RESTPROXY.server_auth.split(':') self.neutron_id = cfg.CONF.RESTPROXY.neutron_id exclude_physical_networks = \ cfg.CONF.networking_bigswitch_l3_pe.exclude_physical_networks self.sync = Synchronizer(api_url, username, password, self.neutron_id, exclude_physical_networks) self.notifier = EventNotifier() self.watcher = EventWatcher() self.keystone_client = KeystoneClient() self.db_plugin = db_base_plugin_v2.NeutronDbPluginV2() eventlet.spawn(self.watcher.watch) eventlet.spawn(self._bcf_sync, cfg.CONF.networking_bigswitch_l3_pe.sync_interval)
def test_model_update_port_rollback(self): """Test for proper rollback for Cisco model layer update port failure. Test that the vSwitch plugin port configuration is rolled back (restored) by the Cisco plugin model layer when there is a failure in the Nexus sub-plugin for an update port operation. The update port operation simulates a port attachment scenario: first a port is created with no instance (null device_id), and then a port update is requested with a non-null device_id to simulate the port attachment. """ with self.port(fmt=self.fmt, device_id='', device_owner=DEVICE_OWNER) as orig_port: inserted_exc = ValueError with mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create', side_effect=inserted_exc): # Send an update port request including a non-null device ID data = { 'port': { 'device_id': DEVICE_ID_2, 'device_owner': DEVICE_OWNER, portbindings.HOST_ID: COMP_HOST_NAME } } port_id = orig_port['port']['id'] req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) # Sanity check failure result code self._assertExpectedHTTP(res.status_int, inserted_exc) # Check that the port still has the original device ID plugin = base_plugin.NeutronDbPluginV2() ctx = context.get_admin_context() db_port = plugin._get_port(ctx, port_id) self.assertEqual(db_port['device_id'], orig_port['port']['device_id'])
def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs): """Resync DHCP bindings on NSXv Edge""" if not kwargs['property']: LOG.error(_LE("Need to specify edge-id parameter")) return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') LOG.info(_LI("Updating NSXv Edge: %s"), edge_id) # Need to create a NeutronDbPlugin object; so that we are able to # do neutron list-ports. plugin = db_base_plugin_v2.NeutronDbPluginV2() nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) try: edge_manager.update_dhcp_service_config(neutron_db.context, edge_id) except exceptions.ResourceNotFound: LOG.error(_LE("Edge %s not found"), edge_id)
def _get_all_tenant(self, context): res = db_base_plugin_v2.NeutronDbPluginV2().get_ports(context) tenants = [r['tenant_id'] for r in res if r['tenant_id']] return list(set(tenants))