def _setup_core_resources(self): core_plugin = quantum.manager.QuantumManager.get_plugin() self._network = core_plugin.create_network( q_context.get_admin_context(), {"network": {"tenant_id": self._tenant_id, "name": "test net", "admin_state_up": True, "shared": False}}, ) self._subnet = core_plugin.create_subnet( q_context.get_admin_context(), { "subnet": { "network_id": self._network["id"], "name": "test subnet", "cidr": "192.168.1.0/24", "ip_version": 4, "gateway_ip": "192.168.1.1", "allocation_pools": attributes.ATTR_NOT_SPECIFIED, "dns_nameservers": attributes.ATTR_NOT_SPECIFIED, "host_routes": attributes.ATTR_NOT_SPECIFIED, "enable_dhcp": True, } }, ) self._subnet_id = self._subnet["id"]
def _setup_core_resources(self): core_plugin = quantum.manager.QuantumManager.get_plugin() self._network = core_plugin.create_network( q_context.get_admin_context(), { 'network': { 'tenant_id': self._tenant_id, 'name': 'test net', 'admin_state_up': True, 'shared': False, } } ) self._subnet = core_plugin.create_subnet( q_context.get_admin_context(), { 'subnet': { 'network_id': self._network['id'], 'name': 'test subnet', 'cidr': '192.168.1.0/24', 'ip_version': 4, 'gateway_ip': '192.168.1.1', 'allocation_pools': attributes.ATTR_NOT_SPECIFIED, 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, 'host_routes': attributes.ATTR_NOT_SPECIFIED, 'enable_dhcp': True, } } ) self._subnet_id = self._subnet['id']
def test_get_ready_devices_inactive_pool(self): with self.vip() as vip: # set the pool inactive need to use plugin directly since # status is not tenant mutable self.plugin_instance.update_pool( context.get_admin_context(), vip["vip"]["pool_id"], {"pool": {"status": constants.INACTIVE}} ) ready = self.callbacks.get_ready_devices(context.get_admin_context()) self.assertFalse(ready)
def test_get_ready_devices_inactive_vip(self): with self.vip() as vip: # set the vip inactive need to use plugin directly since # status is not tenant mutable self.plugin_instance.update_vip( context.get_admin_context(), vip['vip']['id'], {'vip': {'status': constants.INACTIVE}} ) ready = self.callbacks.get_ready_devices( context.get_admin_context(), ) self.assertFalse(ready)
def set_default_svctype_id(original_id): if not original_id: svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() # Fetch default service type - it must exist res = svctype_mgr.get_service_types(context.get_admin_context(), filters={"default": [True]}) return res[0]["id"] return original_id
def test_delete_healthmonitor_cascade_deletion_of_associations(self): with self.health_monitor(type='HTTP', no_delete=True) as monitor: with self.pool() as pool: data = { 'health_monitor': { 'id': monitor['health_monitor']['id'], 'tenant_id': self._tenant_id } } req = self.new_create_request('pools', data, fmt=self.fmt, id=pool['pool']['id'], subresource='health_monitors') res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 201) ctx = context.get_admin_context() # check if we actually have corresponding Pool associations qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertTrue(qry.all()) # delete the HealthMonitor instance req = self.new_delete_request('health_monitors', monitor['health_monitor']['id']) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 204) # check if all corresponding Pool associations are deleted qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertFalse(qry.all())
def test_delete_healthmonitor_cascade_deletion_of_associations(self): with self.health_monitor(type='HTTP', no_delete=True) as monitor: with self.pool() as pool: data = { 'health_monitor': { 'id': monitor['health_monitor']['id'], 'tenant_id': self._tenant_id } } req = self.new_create_request( 'pools', data, fmt=self.fmt, id=pool['pool']['id'], subresource='health_monitors') res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 201) ctx = context.get_admin_context() # check if we actually have corresponding Pool associations qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertTrue(qry.all()) # delete the HealthMonitor instance req = self.new_delete_request( 'health_monitors', monitor['health_monitor']['id'] ) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, 204) # check if all corresponding Pool associations are deleted qry = ctx.session.query(ldb.PoolMonitorAssociation) qry = qry.filter_by(monitor_id=monitor['health_monitor']['id']) self.assertFalse(qry.all())
def test_ports_vif_host(self): cfg.CONF.set_default('allow_overlapping_ips', True) host_arg = {portbindings.HOST_ID: self.hostname} with contextlib.nested( self.port(name='name1', arg_list=(portbindings.HOST_ID,), **host_arg), self.port(name='name2')): ctx = context.get_admin_context() ports = self._list('ports', quantum_context=ctx)['ports'] self.assertEqual(2, len(ports)) for port in ports: if port['name'] == 'name1': self._check_response_portbindings_host(port) else: self.assertFalse(port[portbindings.HOST_ID]) # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False, read_deleted="no") ports = self._list('ports', quantum_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: self._check_response_no_portbindings_host(non_admin_port)
def test_get_logical_device_activate(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() # build the expected port = self.plugin_instance._core_plugin.get_port( ctx, vip['vip']['port_id']) subnet = self.plugin_instance._core_plugin.get_subnet( ctx, vip['vip']['subnet_id']) port['fixed_ips'][0]['subnet'] = subnet # reload pool to add members and vip pool = self.plugin_instance.get_pool( ctx, pool['pool']['id']) pool['status'] = constants.ACTIVE vip['vip']['status'] = constants.ACTIVE vip['vip']['port'] = port member['member']['status'] = constants.ACTIVE expected = { 'pool': pool, 'vip': vip['vip'], 'members': [member['member']], 'healthmonitors': [] } logical_config = self.callbacks.get_logical_device( ctx, pool['id'], activate=True) self.assertEqual(logical_config, expected)
def setUp(self): super(MetaQuantumPluginV2Test, self).setUp() db._ENGINE = None db._MAKER = None self.fake_tenant_id = uuidutils.generate_uuid() self.context = context.get_admin_context() db.configure_db() setup_metaplugin_conf() self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() args = ['--config-file', etcdir('quantum.conf.test')] self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client') client_cls = self.client_cls_p.start() self.client_inst = mock.Mock() client_cls.return_value = self.client_inst self.client_inst.create_network.return_value = \ {'id': 'fake_id'} self.client_inst.create_port.return_value = \ {'id': 'fake_id'} self.client_inst.create_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.update_network.return_value = \ {'id': 'fake_id'} self.client_inst.update_port.return_value = \ {'id': 'fake_id'} self.client_inst.update_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.delete_network.return_value = True self.client_inst.delete_port.return_value = True self.client_inst.delete_subnet.return_value = True self.plugin = MetaPluginV2(configfile=None)
def test_single_get_tenant(self): plugin = quantum.db.db_base_plugin_v2.QuantumDbPluginV2() with self.network() as network: net_id = network['network']['id'] ctx = context.get_admin_context() n = plugin._get_network(ctx, net_id) self.assertEqual(net_id, n.id)
def setUp(self): self.adminContext = context.get_admin_context() test_config['config_files'] = [NVP_INI_CONFIG_PATH] test_config['plugin_name_v2'] = ( 'quantum.plugins.nicira.QuantumPlugin.NvpPluginV2') cfg.CONF.set_override('api_extensions_path', NVP_EXTENSIONS_PATH) # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): self.saved_attr_map[resource] = attrs.copy() ext_mgr = MacLearningExtensionManager() test_config['extension_manager'] = ext_mgr # mock nvp api client self.fc = fake_nvpapiclient.FakeClient(NVP_FAKE_RESPS_PATH) self.mock_nvpapi = mock.patch('%s.NvpApiClient.NVPApiHelper' % NVP_MODULE_PATH, autospec=True) instance = self.mock_nvpapi.start() def _fake_request(*args, **kwargs): return self.fc.fake_request(*args, **kwargs) # Emulate tests against NVP 2.x instance.return_value.get_nvp_version.return_value = "2.999" instance.return_value.request.side_effect = _fake_request cfg.CONF.set_override('metadata_mode', None, 'NVP') self.addCleanup(self.fc.reset_all) self.addCleanup(self.mock_nvpapi.stop) self.addCleanup(self.restore_resource_attribute_map) self.addCleanup(cfg.CONF.reset) super(MacLearningDBTestCase, self).setUp()
def test_update_vip_change_pool(self): with self.subnet() as subnet: with contextlib.nested( self.pool(name="pool1"), self.pool(name="pool2") ) as (pool1, pool2): with self.vip(name='vip1', subnet=subnet, pool=pool1) as vip: # change vip from pool1 to pool2 vip_data = { 'id': vip['vip']['id'], 'name': 'vip1', 'pool_id': pool2['pool']['id'], } ctx = context.get_admin_context() self.plugin.update_vip(ctx, vip['vip']['id'], {'vip': vip_data}) db_pool2 = (ctx.session.query(ldb.Pool). filter_by(id=pool2['pool']['id']).one()) db_pool1 = (ctx.session.query(ldb.Pool). filter_by(id=pool1['pool']['id']).one()) # check that pool1.vip became None self.assertIsNone(db_pool1.vip) # and pool2 got vip self.assertEqual(db_pool2.vip.id, vip['vip']['id'])
def test_model_update_port_rollback(self): """Test for proper rollback for Cisco model layer update port failure. Test that the vSwitch plugin port configuration is rolled back (restored) by the Cisco plugin model layer when there is a failure in the Nexus sub-plugin for an update port operation. """ with self.port(fmt=self.fmt) as orig_port: inserted_exc = ValueError with mock.patch.object( virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create', side_effect=inserted_exc): # Send an update port request with a new device ID device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1" if orig_port['port']['device_id'] == device_id: device_id = "600df00d-e4a8-4a3a-8906-feed600df00d" data = {'port': {'device_id': device_id}} port_id = orig_port['port']['id'] req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) # Sanity check failure result code self._assertExpectedHTTP(res.status_int, inserted_exc) # Check that the port still has the original device ID plugin = base_plugin.QuantumDbPluginV2() ctx = context.get_admin_context() db_port = plugin._get_port(ctx, port_id) self.assertEqual(db_port['device_id'], orig_port['port']['device_id'])
def _validate_servicetype_ref(data, valid_values=None): """ Verify the service type id exists """ svc_type_id = data svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() try: svctype_mgr.get_service_type(context.get_admin_context(), svc_type_id) except servicetype_db.ServiceTypeNotFound: return _("The service type '%s' does not exist") % svc_type_id
def test_delete_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip: self.mock_api.reset_mock() ctx = context.get_admin_context() self.plugin_instance.delete_vip(ctx, vip["vip"]["id"]) self.mock_api.destroy_pool.assert_called_once_with(mock.ANY, vip["vip"]["pool_id"])
def testQuantumContextAdminToDict(self): self.db_api_session.return_value = 'fakesession' cxt = context.get_admin_context() cxt_dict = cxt.to_dict() self.assertIsNone(cxt_dict['user_id']) self.assertIsNone(cxt_dict['tenant_id']) self.assertIsNotNone(cxt.session) self.assertFalse('session' in cxt_dict)
def setUp(self): super(OFCManagerTestBase, self).setUp() driver = "quantum.tests.unit.nec.stub_ofc_driver.StubOFCDriver" config.CONF.set_override('driver', driver, 'OFC') ndb.initialize() self.addCleanup(ndb.clear_db) self.ofc = ofc_manager.OFCManager() self.ctx = context.get_admin_context()
def setUp(self): self.addCleanup(mock.patch.stopall) ofc_manager_cls = mock.patch(OFC_MANAGER).start() ofc_driver = ofc_manager_cls.return_value.driver ofc_driver.filter_supported.return_value = self.PACKET_FILTER_ENABLE super(NecPluginV2TestCase, self).setUp(self._plugin_name) self.context = q_context.get_admin_context() self.plugin = manager.QuantumManager.get_plugin()
def set_default_svctype_id(original_id): if not original_id: svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() # Fetch default service type - it must exist res = svctype_mgr.get_service_types(context.get_admin_context(), filters={'default': [True]}) return res[0]['id'] return original_id
def _validate_servicetype_ref(data, valid_values=None): """Verify the service type id exists.""" svc_type_id = data svctype_mgr = servicetype_db.ServiceTypeManager.get_instance() try: svctype_mgr.get_service_type(context.get_admin_context(), svc_type_id) except servicetype_db.ServiceTypeNotFound: return _("The service type '%s' does not exist") % svc_type_id
def test_network_update_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = "an_admin" res, data, net_id = self._put_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {"network": data} instance.update_network.assert_called_with(mock.ANY, net_id, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPOk.code)
def test_create_pool_healthmon_invalid_pool_id(self): with self.health_monitor() as healthmon: self.assertRaises(loadbalancer.PoolNotFound, self.plugin.create_pool_health_monitor, context.get_admin_context(), healthmon, "123-456-789" )
def _send_all_data(self): """Pushes all data to network ctrl (networks/ports, ports/attachments) to give the controller an option to re-sync it's persistent store with quantum's current view of that data. """ admin_context = qcontext.get_admin_context() networks = {} ports = {} all_networks = super(QuantumRestProxyV2, self).get_networks(admin_context) or [] for net in all_networks: networks[net.get('id')] = { 'id': net.get('id'), 'name': net.get('name'), 'op-status': net.get('admin_state_up'), } subnets = net.get('subnets', []) for subnet_id in subnets: subnet = self.get_subnet(admin_context, subnet_id) gateway_ip = subnet.get('gateway_ip') if gateway_ip: # FIX: For backward compatibility with wire protocol networks[net.get('id')]['gateway'] = gateway_ip ports = [] net_filter = {'network_id': [net.get('id')]} net_ports = super(QuantumRestProxyV2, self).get_ports(admin_context, filters=net_filter) or [] for port in net_ports: port_details = { 'id': port.get('id'), 'attachment': { 'id': port.get('id') + '00', 'mac': port.get('mac_address'), }, 'state': port.get('status'), 'op-status': port.get('admin_state_up'), 'mac': None } ports.append(port_details) networks[net.get('id')]['ports'] = ports try: resource = '/topology' data = { 'networks': networks, } ret = self.servers.put(resource, data) if not self.servers.action_success(ret): raise RemoteRestError(ret[2]) return ret except RemoteRestError as e: LOG.error( 'QuantumRestProxy: Unable to update remote network: %s' % e.message) raise
def test_delete_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet, no_delete=True) as vip: self.mock_api.reset_mock() ctx = context.get_admin_context() self.plugin_instance.delete_vip(ctx, vip['vip']['id']) self.mock_api.destroy_pool.assert_called_once_with( mock.ANY, vip['vip']['pool_id'])
def test_create_pool_health_monitor(self): with contextlib.nested(self.pool(name="pool"), self.health_monitor(), self.health_monitor()) as (pool, health_mon1, health_mon2): res = self.plugin.create_pool_health_monitor( context.get_admin_context(), health_mon1, pool['pool']['id']) self.assertEqual( {'health_monitor': [health_mon1['health_monitor']['id']]}, res) res = self.plugin.create_pool_health_monitor( context.get_admin_context(), health_mon2, pool['pool']['id']) self.assertEqual( { 'health_monitor': [ health_mon1['health_monitor']['id'], health_mon2['health_monitor']['id'] ] }, res)
def test_get_logical_device_inactive(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: self.assertRaises(Exception, self.callbacks.get_logical_device, context.get_admin_context(), pool['pool']['id'], activate=False)
def _get_network_with_floatingips(self, network): admin_context = qcontext.get_admin_context() net_id = network["id"] net_filter = {"floating_network_id": [net_id]} fl_ips = super(QuantumRestProxyV2, self).get_floatingips(admin_context, filters=net_filter) or [] network["floatingips"] = fl_ips return network
def test_security_group_rules_for_devices_ipv6_source_group(self): fake_prefix = test_fw.FAKE_PREFIX['IPv6'] with self.network() as n: with nested(self.subnet(n, cidr=fake_prefix, ip_version=6), self.security_group(), self.security_group()) as (subnet_v6, sg1, sg2): sg1_id = sg1['security_group']['id'] sg2_id = sg2['security_group']['id'] rule1 = self._build_security_group_rule( sg1_id, 'ingress', 'tcp', '24', '25', ethertype='IPv6', source_group_id=sg2['security_group']['id']) rules = { 'security_group_rules': [rule1['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) self.deserialize(self.fmt, res) self.assertEquals(res.status_int, 201) res1 = self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg1_id, sg2_id]) ports_rest1 = self.deserialize(self.fmt, res1) port_id1 = ports_rest1['port']['id'] self.rpc.devices = {port_id1: ports_rest1['port']} devices = [port_id1, 'no_exist_device'] res2 = self._create_port( self.fmt, n['network']['id'], fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}], security_groups=[sg2_id]) ports_rest2 = self.deserialize(self.fmt, res2) port_id2 = ports_rest2['port']['id'] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices( ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [{'direction': 'ingress', 'source_ip_prefix': 'fe80::3/128', 'protocol': 'tcp', 'ethertype': 'IPv6', 'port_range_max': 25, 'port_range_min': 24, 'source_group_id': sg2_id, 'security_group_id': sg1_id}, {'ethertype': 'IPv6', 'direction': 'egress'}, ] self.assertEquals(port_rpc['security_group_rules'], expected) self._delete('ports', port_id1) self._delete('ports', port_id2)
def test_network_create_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = "an_admin" res, data = self._post_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {"network": data} exp_input["network"].update({"admin_state_up": True, "tenant_id": "an_admin", "shared": False}) instance.create_network.assert_called_with(mock.ANY, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPCreated.code)
def setUp(self): self.adminContext = context.get_admin_context() test_config['plugin_name_v2'] = ( 'quantum.tests.unit.test_agent_ext_plugin.TestAgentPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) ext_mgr = AgentTestExtensionManager() test_config['extension_manager'] = ext_mgr super(AgentDBTestCase, self).setUp()
def _send_all_data(self): """Pushes all data to network ctrl (networks/ports, ports/attachments) to give the controller an option to re-sync it's persistent store with quantum's current view of that data. """ admin_context = qcontext.get_admin_context() networks = {} ports = {} all_networks = super(QuantumRestProxyV2, self).get_networks(admin_context) or [] for net in all_networks: networks[net.get('id')] = { 'id': net.get('id'), 'name': net.get('name'), 'op-status': net.get('admin_state_up'), } subnets = net.get('subnets', []) for subnet_id in subnets: subnet = self.get_subnet(admin_context, subnet_id) gateway_ip = subnet.get('gateway_ip') if gateway_ip: # FIX: For backward compatibility with wire protocol networks[net.get('id')]['gateway'] = gateway_ip ports = [] net_filter = {'network_id': [net.get('id')]} net_ports = super(QuantumRestProxyV2, self).get_ports( admin_context, filters=net_filter) or [] for port in net_ports: port_details = { 'id': port.get('id'), 'attachment': { 'id': port.get('id') + '00', 'mac': port.get('mac_address'), }, 'state': port.get('status'), 'op-status': port.get('admin_state_up'), 'mac': None } ports.append(port_details) networks[net.get('id')]['ports'] = ports try: resource = '/topology' data = { 'networks': networks, } ret = self.servers.put(resource, data) if not self.servers.action_success(ret): raise RemoteRestError(ret[2]) return ret except RemoteRestError as e: LOG.error('QuantumRestProxy: Unable to update remote network: %s' % e.message) raise
def test_network_update_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data, net_id = self._put_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} instance.update_network.assert_called_with(mock.ANY, net_id, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPOk.code)
def test_ports_vif_details(self): cfg.CONF.set_default('allow_overlapping_ips', True) plugin = QuantumManager.get_plugin() with contextlib.nested(self.port(), self.port()) as (port1, port2): ctx = context.get_admin_context() ports = plugin.get_ports(ctx) self.assertEqual(len(ports), 2) for port in ports: self.assertEqual(port['binding:vif_type'], portbindings.VIF_TYPE_HYPERV)
def test_get_ready_devices_multiple_vips_and_pools(self): ctx = context.get_admin_context() # add 3 pools and 2 vips directly to DB # to create 2 "ready" devices and one pool without vip pools = [] for i in xrange(0, 3): pools.append( ldb.Pool( id=uuidutils.generate_uuid(), subnet_id=self._subnet_id, protocol="HTTP", lb_method="ROUND_ROBIN", status=constants.ACTIVE, admin_state_up=True, ) ) ctx.session.add(pools[i]) vip0 = ldb.Vip( id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[0].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3, ) ctx.session.add(vip0) pools[0].vip_id = vip0.id vip1 = ldb.Vip( id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[1].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3, ) ctx.session.add(vip1) pools[1].vip_id = vip1.id ctx.session.flush() self.assertEqual(ctx.session.query(ldb.Pool).count(), 3) self.assertEqual(ctx.session.query(ldb.Vip).count(), 2) ready = self.callbacks.get_ready_devices(ctx) self.assertEqual(len(ready), 2) self.assertIn(pools[0].id, ready) self.assertIn(pools[1].id, ready) self.assertNotIn(pools[2].id, ready) # cleanup ctx.session.query(ldb.Pool).delete() ctx.session.query(ldb.Vip).delete()
def test_security_group_rules_for_devices_ipv6_source_group(self): fake_prefix = test_fw.FAKE_PREFIX["IPv6"] with self.network() as n: with nested( self.subnet(n, cidr=fake_prefix, ip_version=6), self.security_group(), self.security_group() ) as (subnet_v6, sg1, sg2): sg1_id = sg1["security_group"]["id"] sg2_id = sg2["security_group"]["id"] rule1 = self._build_security_group_rule( sg1_id, "ingress", "tcp", "24", "25", ethertype="IPv6", source_group_id=sg2["security_group"]["id"] ) rules = {"security_group_rules": [rule1["security_group_rule"]]} res = self._create_security_group_rule("json", rules) self.deserialize("json", res) self.assertEquals(res.status_int, 201) res1 = self._create_port( "json", n["network"]["id"], fixed_ips=[{"subnet_id": subnet_v6["subnet"]["id"]}], security_groups=[sg1_id, sg2_id], ) ports_rest1 = self.deserialize("json", res1) port_id1 = ports_rest1["port"]["id"] self.rpc.devices = {port_id1: ports_rest1["port"]} devices = [port_id1, "no_exist_device"] res2 = self._create_port( "json", n["network"]["id"], fixed_ips=[{"subnet_id": subnet_v6["subnet"]["id"]}], security_groups=[sg2_id], ) ports_rest2 = self.deserialize("json", res2) port_id2 = ports_rest2["port"]["id"] ctx = context.get_admin_context() ports_rpc = self.rpc.security_group_rules_for_devices(ctx, devices=devices) port_rpc = ports_rpc[port_id1] expected = [ { "direction": "ingress", "source_ip_prefix": "fe80::3/128", "protocol": "tcp", "ethertype": "IPv6", "port_range_max": 25, "port_range_min": 24, "source_group_id": sg2_id, "security_group_id": sg1_id, }, {"ethertype": "IPv6", "direction": "egress"}, ] self.assertEquals(port_rpc["security_group_rules"], expected) self._delete("ports", port_id1) self._delete("ports", port_id2)
def test_get_logical_device_inactive(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip["vip"]["pool_id"]): self.assertRaises( Exception, self.callbacks.get_logical_device, context.get_admin_context(), pool["pool"]["id"], activate=False, )
def get_external_network_id(self, context, **kwargs): """Get one external network id for l3 agent. l3 agent expects only on external network when it performs this query. """ context = quantum_context.get_admin_context() plugin = manager.QuantumManager.get_plugin() net_id = plugin.get_external_network_id(context) LOG.debug(_("External network ID returned to l3 agent: %s"), net_id) return net_id
def _get_all_subnets_json_for_network(self, net_id): admin_context = qcontext.get_admin_context() subnets = self._get_subnets_by_network(admin_context, net_id) subnets_details = [] if subnets: for subnet in subnets: subnet_dict = self._make_subnet_dict(subnet) mapped_subnet = self._map_state_and_status(subnet_dict) subnets_details.append(mapped_subnet) return subnets_details
def test_get_logical_device_inactive(self): with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']): self.assertRaises( exceptions.Invalid, self.callbacks.get_logical_device, context.get_admin_context(), pool['pool']['id'], activate=False )
def setUp(self): self.dhcp_notifier_cls_p = mock.patch( 'quantum.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI') self.dhcp_notifier = mock.Mock(name='dhcp_notifier') self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start() self.dhcp_notifier_cls.return_value = self.dhcp_notifier super(OvsL3AgentNotifierTestCase, self).setUp(self.plugin_str) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() self.addCleanup(self.dhcp_notifier_cls_p.stop)
def _update_port_test_helper(self, expected, func, **kwargs): core = self.plugin_instance._core_plugin with self.pool() as pool: with self.vip(pool=pool) as vip: with self.member(pool_id=vip['vip']['pool_id']) as member: ctx = context.get_admin_context() func(ctx, port_id=vip['vip']['port_id'], **kwargs) db_port = core.get_port(ctx, vip['vip']['port_id']) for k, v in expected.iteritems(): self.assertEqual(db_port[k], v)
def test_network_create_with_provider_attrs(self): ctx = context.get_admin_context() ctx.tenant_id = 'an_admin' res, data = self._post_network_with_provider_attrs(ctx) instance = self.plugin.return_value exp_input = {'network': data} exp_input['network'].update({ 'admin_state_up': True, 'tenant_id': 'an_admin', 'shared': False }) instance.create_network.assert_called_with(mock.ANY, network=exp_input) self.assertEqual(res.status_int, web_exc.HTTPCreated.code)
def setUp(self): self.addCleanup(mock.patch.stopall) ofc_manager_p = mock.patch(OFC_MANAGER) ofc_manager_cls = ofc_manager_p.start() self.ofc = mock.Mock() ofc_manager_cls.return_value = self.ofc self.ofc_port_exists = False self._setup_side_effects() super(TestNecPortsV2Callback, self).setUp() self.context = q_context.get_admin_context() self.plugin = manager.QuantumManager.get_plugin() self.callbacks = nec_plugin.NECPluginV2RPCCallbacks(self.plugin)
def setUp(self): super(MetaQuantumPluginV2Test, self).setUp() db._ENGINE = None db._MAKER = None self.fake_tenant_id = str(uuid.uuid4()) self.context = context.get_admin_context() sql_connection = 'sqlite:///:memory:' options = {"sql_connection": sql_connection} options.update({'base': models_v2.model_base.BASEV2}) db.configure_db(options) self.mox = mox.Mox() self.stubs = stubout.StubOutForTesting() args = ['--config-file', etcdir('quantum.conf.test')] #config.parse(args=args) # Update the plugin cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0', 'PROXY') cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY') cfg.CONF.set_override('admin_user', 'quantum', 'PROXY') cfg.CONF.set_override('admin_password', 'password', 'PROXY') cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY') cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META') cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META') cfg.CONF.set_override('default_flavor', 'fake2', 'META') cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META') cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") #TODO(nati) remove this after subnet quota change is merged cfg.CONF.max_dns_nameservers = 10 self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client') client_cls = self.client_cls_p.start() self.client_inst = mock.Mock() client_cls.return_value = self.client_inst self.client_inst.create_network.return_value = \ {'id': 'fake_id'} self.client_inst.create_port.return_value = \ {'id': 'fake_id'} self.client_inst.create_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.update_network.return_value = \ {'id': 'fake_id'} self.client_inst.update_port.return_value = \ {'id': 'fake_id'} self.client_inst.update_subnet.return_value = \ {'id': 'fake_id'} self.client_inst.delete_network.return_value = True self.client_inst.delete_port.return_value = True self.client_inst.delete_subnet.return_value = True self.plugin = MetaPluginV2(configfile=None)
def _fetch_external_net_id(self): """Find UUID of single external network for this agent""" if self.conf.gateway_external_network_id: return self.conf.gateway_external_network_id try: return self.plugin_rpc.get_external_network_id( context.get_admin_context()) except rpc_common.RemoteError as e: if e.exc_type == 'TooManyExternalNetworks': msg = _("The 'gateway_external_network_id' must be configured" " if Quantum has more than one external network.") raise Exception(msg) else: raise
def setUp(self): self.adminContext = context.get_admin_context() test_config['plugin_name_v2'] = ( 'quantum.tests.unit.test_agent_ext_plugin.TestAgentPlugin') # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) # Save the original RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): self.saved_attr_map[resource] = attrs.copy() ext_mgr = AgentTestExtensionManager() test_config['extension_manager'] = ext_mgr self.addCleanup(self.restore_resource_attribute_map) self.addCleanup(cfg.CONF.reset) super(AgentDBTestCase, self).setUp()
def test_update_vip(self): with self.subnet() as subnet: with self.pool(subnet=subnet) as pool: with self.vip(pool=pool, subnet=subnet) as vip: self.mock_api.reset_mock() ctx = context.get_admin_context() vip['vip'].pop('status') new_vip = self.plugin_instance.update_vip( ctx, vip['vip']['id'], vip) self.mock_api.reload_pool.assert_called_once_with( mock.ANY, vip['vip']['pool_id']) self.assertEqual(new_vip['status'], constants.PENDING_UPDATE)
def test_create_port(self): """Test brocade specific port db.""" net_id = str(uuid.uuid4()) port_id = str(uuid.uuid4()) # port_id is truncated: since the linux-bridge tap device names are # based on truncated port id, this enables port lookups using # tap devices port_id = port_id[0:11] tenant_id = str(uuid.uuid4()) admin_state_up = True # Create Port # To create a port a network must exists, Create a network self.context = context.get_admin_context() brocade_db.create_network(self.context, net_id, TEST_VLAN) physical_interface = "em1" brocade_db.create_port(self.context, port_id, net_id, physical_interface, TEST_VLAN, tenant_id, admin_state_up) port = brocade_db.get_port(self.context, port_id) self.assertEqual(port['port_id'], port_id) self.assertEqual(port['network_id'], net_id) self.assertEqual(port['physical_interface'], physical_interface) self.assertEqual(int(port['vlan_id']), TEST_VLAN) self.assertEqual(port['tenant_id'], tenant_id) self.assertEqual(port['admin_state_up'], admin_state_up) admin_state_up = True brocade_db.update_port_state(self.context, port_id, admin_state_up) port = brocade_db.get_port(self.context, port_id) self.assertEqual(port['admin_state_up'], admin_state_up) admin_state_up = False brocade_db.update_port_state(self.context, port_id, admin_state_up) port = brocade_db.get_port(self.context, port_id) self.assertEqual(port['admin_state_up'], admin_state_up) admin_state_up = True brocade_db.update_port_state(self.context, port_id, admin_state_up) port = brocade_db.get_port(self.context, port_id) self.assertEqual(port['admin_state_up'], admin_state_up) # Delete Port brocade_db.delete_port(self.context, port_id)
def test_get_ready_devices_multiple_vips_and_pools(self): ctx = context.get_admin_context() # add 3 pools and 2 vips directly to DB # to create 2 "ready" devices and one pool without vip pools = [] for i in xrange(0, 3): pools.append( ldb.Pool(id=uuidutils.generate_uuid(), subnet_id=self._subnet_id, protocol="HTTP", lb_method="ROUND_ROBIN", status=constants.ACTIVE, admin_state_up=True)) ctx.session.add(pools[i]) vip0 = ldb.Vip(id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[0].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3) ctx.session.add(vip0) pools[0].vip_id = vip0.id vip1 = ldb.Vip(id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[1].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3) ctx.session.add(vip1) pools[1].vip_id = vip1.id ctx.session.flush() self.assertEqual(ctx.session.query(ldb.Pool).count(), 3) self.assertEqual(ctx.session.query(ldb.Vip).count(), 2) ready = self.callbacks.get_ready_devices(ctx) self.assertEqual(len(ready), 2) self.assertIn(pools[0].id, ready) self.assertIn(pools[1].id, ready) self.assertNotIn(pools[2].id, ready) # cleanup ctx.session.query(ldb.Pool).delete() ctx.session.query(ldb.Vip).delete()
def test_create_network(self): """Test brocade specific network db.""" net_id = str(uuid.uuid4()) # Create a network self.context = context.get_admin_context() brocade_db.create_network(self.context, net_id, TEST_VLAN) # Get the network and verify net = brocade_db.get_network(self.context, net_id) self.assertEqual(net['id'], net_id) self.assertEqual(int(net['vlan']), TEST_VLAN) # Delete the network brocade_db.delete_network(self.context, net['id'])
def sync_routers(self, context, **kwargs): """Sync routers according to filters to a specific agent. @param context: contain user information @param kwargs: host, or router_id @return: a list of routers with their interfaces and floating_ips """ router_id = kwargs.get('router_id') # TODO(gongysh) we will use host in kwargs for multi host BP context = quantum_context.get_admin_context() plugin = manager.QuantumManager.get_plugin() routers = plugin.get_sync_data(context, router_id) LOG.debug(_("Routers returned to l3 agent:\n %s"), jsonutils.dumps(routers, indent=5)) return routers
def test_update_vip_raises_vip_exists(self): with self.subnet() as subnet: with contextlib.nested(self.pool(name="pool1"), self.pool(name="pool2")) as (pool1, pool2): with contextlib.nested( self.vip(name='vip1', subnet=subnet, pool=pool1), self.vip(name='vip2', subnet=subnet, pool=pool2)) as (vip1, vip2): vip_data = { 'id': vip2['vip']['id'], 'name': 'vip1', 'pool_id': pool1['pool']['id'], } self.assertRaises(loadbalancer.VipExists, self.plugin.update_vip, context.get_admin_context(), vip2['vip']['id'], {'vip': vip_data})
def setUp(self): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): self.saved_attr_map[resource] = attrs.copy() super(OvsAgentSchedulerTestCase, self).setUp(self.plugin_str) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends # the global attribute map attributes.RESOURCE_ATTRIBUTE_MAP.update( agent.RESOURCE_ATTRIBUTE_MAP) self.addCleanup(self.restore_attribute_map) self.agentscheduler_dbMinxin = manager.QuantumManager.get_plugin()