def test_unset_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 device_owner = 'trunk:subport' host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] trunk_port = models_v2.Port(tenant_id=tenant_id, network_id=network_id, device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.tenant_provisioned.return_value = False mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port resource = 'SubPort' event = 'AFTER_DELETE' trigger = 'AristaDriver' subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[trunk_models.SubPort()]) sp = models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id) self.drv.ndb.get_port.return_value = sp self.drv.unset_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.unplug_port_from_network(vm_id, device_owner, host_id, port_id, network_id, tenant_id, [], vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group([], profile), mock.call.tenant_provisioned(tenant_id), mock.call.delete_tenant(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)
def test_distributed_port_binding_deleted_by_port_deletion(self): network_id = uuidutils.generate_uuid() network_obj.Network(self.ctx, id=network_id).create() with db_api.context_manager.writer.using(self.ctx): device_owner = constants.DEVICE_OWNER_DVR_INTERFACE port = models_v2.Port(id='port_id', network_id=network_id, mac_address='00:11:22:33:44:55', admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id='device_id', device_owner=device_owner) self.ctx.session.add(port) binding_kwarg = { 'port_id': 'port_id', 'host': 'host', 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL, 'router_id': 'router_id', 'status': constants.PORT_STATUS_DOWN } self.ctx.session.add( models.DistributedPortBinding(**binding_kwarg)) binding_kwarg['host'] = 'another-host' self.ctx.session.add( models.DistributedPortBinding(**binding_kwarg)) with warnings.catch_warnings(record=True) as warning_list: with db_api.context_manager.writer.using(self.ctx): self.ctx.session.delete(port) self.assertEqual([], warning_list) ports = ml2_db.get_distributed_port_bindings(self.ctx, 'port_id') self.assertEqual(0, len(ports))
def test_port_without_device_owner_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(id='port-uuid', device_id=device_id, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def test_port_without_device_id_no_notify(self): port = models_v2.Port(id='port-uuid', device_owner=n_const.DEVICE_OWNER_DHCP, status=n_const.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(n_const.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def test_non_compute_instances_no_notify(self): port = models_v2.Port(id='port-uuid', device_id='device-uuid', device_owner="network:dhcp", status=constants.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def test_port_without_uuid_device_id_no_notify(self): port = models_v2.Port(id='port-uuid', device_id='compute_probe:', device_owner='compute:', status=constants.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def test_port_without_id_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(device_id=device_id, device_owner="compute:", status=constants.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def _setup_port_binding(self, **kwargs): with self.ctx.session.begin(subtransactions=True): mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':')) port_id = uuidutils.generate_uuid() network_id = kwargs.get('network_id', TEST_NETWORK_ID) device_owner = kwargs.get('device_owner', '') device_id = kwargs.get('device_id', '') host = kwargs.get('host', helpers.HOST) self.ctx.session.add(models_v2.Port( id=port_id, network_id=network_id, mac_address=mac, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id=device_id, device_owner=device_owner)) port_binding_cls = models.PortBinding binding_kwarg = {'port_id': port_id, 'host': host, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL} if device_owner == constants.DEVICE_OWNER_DVR_INTERFACE: port_binding_cls = models.DistributedPortBinding binding_kwarg['router_id'] = TEST_ROUTER_ID binding_kwarg['status'] = constants.PORT_STATUS_DOWN self.ctx.session.add(port_binding_cls(**binding_kwarg)) if network_id == TEST_HA_NETWORK_ID: agent = self.get_l3_agent_by_host(host) l3_hamode.L3HARouterAgentPortBinding( self.ctx, port_id=port_id, router_id=device_id, l3_agent_id=agent['id'], state=kwargs.get( 'host_state', n_const.HA_ROUTER_STATE_ACTIVE)).create()
def _setup_port_binding(self, network_id='network_id', dvr=True): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) device_owner = constants.DEVICE_OWNER_DVR_INTERFACE if dvr else '' self.ctx.session.add( models_v2.Port(id='port_id', network_id=network_id, mac_address='00:11:22:33:44:55', admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id='', device_owner=device_owner)) port_binding_cls = (models.DVRPortBinding if dvr else models.PortBinding) binding_kwarg = { 'port_id': 'port_id', 'host': helpers.HOST, 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL } if dvr: binding_kwarg['router_id'] = 'router_id' binding_kwarg['status'] = constants.PORT_STATUS_DOWN self.ctx.session.add(port_binding_cls(**binding_kwarg))
def test_dvr_port_binding_deleted_by_port_deletion(self): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id='network_id')) device_owner = constants.DEVICE_OWNER_DVR_INTERFACE port = models_v2.Port( id='port_id', network_id='network_id', mac_address='00:11:22:33:44:55', admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, device_id='device_id', device_owner=device_owner) self.ctx.session.add(port) binding_kwarg = { 'port_id': 'port_id', 'host': 'host', 'vif_type': portbindings.VIF_TYPE_UNBOUND, 'vnic_type': portbindings.VNIC_NORMAL, 'router_id': 'router_id', 'status': constants.PORT_STATUS_DOWN } self.ctx.session.add(models.DVRPortBinding(**binding_kwarg)) binding_kwarg['host'] = 'another-host' self.ctx.session.add(models.DVRPortBinding(**binding_kwarg)) with warnings.catch_warnings(record=True) as warning_list: with self.ctx.session.begin(subtransactions=True): self.ctx.session.delete(port) self.assertEqual([], warning_list) ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'port_id') self.assertEqual(0, len(ports))
def _make_port(self): net = self._make_net() with self.ctx.session.begin(): port = models_v2.Port(networks=net, mac_address='1', tenant_id='1', admin_state_up=True, status='DOWN', device_id='2', device_owner='3') self.ctx.session.add(port) return port
def test_non_compute_instances_no_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port = models_v2.Port(id='port-uuid', device_id=device_id, device_owner="network:dhcp", status=constants.PORT_STATUS_ACTIVE) self._record_port_status_changed_helper(constants.PORT_STATUS_ACTIVE, sql_attr.NO_VALUE, port)
def _make_port(self): net = self._make_net() with db_api.context_manager.writer.using(self.ctx): port = models_v2.Port(network_id=net.id, mac_address='1', tenant_id='1', admin_state_up=True, status='DOWN', device_id='2', device_owner='3') self.ctx.session.add(port) return port
def _add_port(self, net_id, port_id): with self.ctx.session.begin(subtransactions=True): port = models_v2.Port(id=port_id, network_id=net_id, mac_address='foo_mac_%s' % port_id, admin_state_up=True, status='DOWN', device_id='', device_owner='') self.ctx.session.add(port)
def _create_port_with_mac(self, context, network_id, port_data, mac_address): try: # since this method could either be used within or outside the # transaction, use convenience method to avoid passing a flag with db_api.autonested_transaction(context.session): db_port = models_v2.Port(mac_address=mac_address, **port_data) context.session.add(db_port) return db_port except db_exc.DBDuplicateEntry: raise n_exc.MacAddressInUse(net_id=network_id, mac=mac_address)
def _setup_neutron_network_and_port(self, network_id, port_id): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) port = models_v2.Port(id=port_id, network_id=network_id, mac_address='foo_mac_address', admin_state_up=True, status='ACTIVE', device_id='', device_owner='') self.ctx.session.add(port)
def _create_router(self, gw_port=True, num_ports=2, create_routes=True): # GW CIDR: 10.0.0.0/24 # Interface CIDRS: 10.0.1.0/24, 10.0.2.0/24, etc. router_id = uuidutils.generate_uuid() port_gw_cidr = netaddr.IPNetwork('10.0.0.0/24') rports = [] if gw_port: port_gw = models_v2.Port( id=uuidutils.generate_uuid(), fixed_ips=[ models_v2.IPAllocation(ip_address=str(port_gw_cidr.ip + 1)) ]) rports.append( l3_models.RouterPort(router_id=router_id, port=port_gw)) else: port_gw = None port_cidrs = [] port_subnets = [] for idx in range(num_ports): cidr = port_gw_cidr.cidr.next(idx + 1) port = models_v2.Port( id=uuidutils.generate_uuid(), fixed_ips=[ models_v2.IPAllocation(ip_address=str(cidr.ip + 1)) ]) port_cidrs.append(cidr) rports.append(l3_models.RouterPort(router_id=router_id, port=port)) port_subnets.append({'cidr': str(cidr)}) routes = [] if create_routes: for cidr in [*port_cidrs, port_gw_cidr]: routes.append( l3_models.RouterRoute(destination=str(cidr.next(100)), nexthop=str(cidr.ip + 10))) return (l3_models.Router(id=router_id, attached_ports=rports, route_list=routes, gw_port_id=port_gw.id if port_gw else None), port_subnets)
def write_neutron_db(Session, port): session = Session() port_ref = models_v2.Port() values = dict(port) port_ref.fixed_ips = [models_v2.IPAllocation()] fixed_ip_dict = dict(values.pop('fixed_ips')[0]) fixed_ip_dict['ip_address'] = str(fixed_ip_dict['ip_address']) port_ref.fixed_ips[0].update(fixed_ip_dict) port_ref.update(values) port_ref.mac_address = str(port_ref.mac_address) session.add(port_ref) session.commit()
def _setup_neutron_port(self, network_id, port_id): mac_address = db_base_plugin_v2.NeutronDbPluginV2._generate_mac() with self.ctx.session.begin(subtransactions=True): port = models_v2.Port(id=port_id, network_id=network_id, mac_address=mac_address, admin_state_up=True, status='DOWN', device_id='', device_owner='') self.ctx.session.add(port) return port
def test_notify_port_status_all_values(self): states = [constants.PORT_STATUS_ACTIVE, constants.PORT_STATUS_DOWN, constants.PORT_STATUS_ERROR, constants.PORT_STATUS_BUILD, sql_attr.NO_VALUE] # test all combinations for previous_port_status in states: for current_port_status in states: port = models_v2.Port(id='port-uuid', device_id='device-uuid', device_owner="compute:", status=current_port_status) self._record_port_status_changed_helper(current_port_status, previous_port_status, port)
def _make_port(self, port, network_id, device_id=None, device_owner=None): session = self.context.session port = models_v2.Port(tenant_id='fake-tenant-id', name='', network_id=network_id, mac_address='aa:bb:cc:dd:ee:{0}'.format(port), admin_state_up=True, status='ACTIVE', device_id=device_id or '', device_owner=device_owner or '') session.add(port) session.flush() return port
def test_notify_port_status_all_values(self): states = [n_const.PORT_STATUS_ACTIVE, n_const.PORT_STATUS_DOWN, n_const.PORT_STATUS_ERROR, n_const.PORT_STATUS_BUILD, sql_attr.NO_VALUE] device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' # test all combinations for previous_port_status in states: for current_port_status in states: port = models_v2.Port(id='port-uuid', device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE, status=current_port_status) self._record_port_status_changed_helper(current_port_status, previous_port_status, port)
def create_port_from_dict(self, record): with self._session.begin(subtransactions=True): port = nmodels.Port( id=uuidutils.generate_uuid(), tenant_id=record['tenant_id'], name=record["name"], network_id=record["network_id"], mac_address=record["mac_address"], admin_state_up=record["admin_state_up"], # ACTIVE by default status=record["status"], device_id=record["device_id"], device_owner=record["device_owner"]) self._session.add(port) return port
def test_update_db_subnet_new_pools_exception(self, pool_mock): context = mock.Mock() mocks = self._prepare_mocks_with_pool_mock(pool_mock) mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend() new_port = {'fixed_ips': [{'ip_address': '192.168.1.20', 'subnet_id': uuidutils.generate_uuid()}, {'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}]} db_port = models_v2.Port(id=uuidutils.generate_uuid(), network_id=uuidutils.generate_uuid()) old_port = {'fixed_ips': [{'ip_address': '192.168.1.10', 'subnet_id': uuidutils.generate_uuid()}, {'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}]} changes = mocks['ipam'].Changes( add=[{'ip_address': '192.168.1.20', 'subnet_id': uuidutils.generate_uuid()}], original=[{'ip_address': '192.168.1.50', 'subnet_id': uuidutils.generate_uuid()}], remove=[{'ip_address': '192.168.1.10', 'subnet_id': uuidutils.generate_uuid()}]) mocks['ipam']._delete_ip_allocation = mock.Mock() mocks['ipam']._make_port_dict = mock.Mock(return_value=old_port) mocks['ipam']._update_ips_for_port = mock.Mock(return_value=changes) mocks['ipam']._update_db_port = mock.Mock( side_effect=db_exc.DBDeadlock) # emulate raising exception on rollback actions mocks['ipam']._ipam_deallocate_ips = mock.Mock(side_effect=ValueError) mocks['ipam']._ipam_allocate_ips = mock.Mock(side_effect=ValueError) # Validate original exception (DBDeadlock) is not overridden by # exception raised on rollback (ValueError) with mock.patch.object(port_obj.IPAllocation, 'create'): self.assertRaises(db_exc.DBDeadlock, mocks['ipam'].update_port_with_ips, context, None, db_port, new_port, mock.Mock()) mocks['ipam']._ipam_deallocate_ips.assert_called_once_with( context, mocks['driver'], db_port, changes.add, revert_on_fail=False) mocks['ipam']._ipam_allocate_ips.assert_called_once_with( context, mocks['driver'], db_port, changes.remove, revert_on_fail=False)
def _setup_neutron_network(self, network_id, port_ids): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id=network_id)) ports = [] for port_id in port_ids: mac_address = (db_base_plugin_v2.NeutronDbPluginV2. _generate_mac()) port = models_v2.Port(id=port_id, network_id=network_id, mac_address=mac_address, admin_state_up=True, status='ACTIVE', device_id='', device_owner='') self.ctx.session.add(port) ports.append(port) return ports
def test_notify_port_active_direct(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' port = models_v2.Port(id=port_id, device_id=device_id, device_owner=DEVICE_OWNER_COMPUTE) expected_event = { 'server_uuid': device_id, 'name': nova.VIF_PLUGGED, 'status': 'completed', 'tag': port_id } self.nova_notifier.notify_port_active_direct(port) self.assertEqual(1, len(self.nova_notifier.batch_notifier.pending_events)) self.assertEqual(expected_event, self.nova_notifier.batch_notifier.pending_events[0])
def setUp(self): super(TestL3GwModeMixin, self).setUp() plugin = __name__ + '.' + TestDbIntPlugin.__name__ self.setup_coreplugin(plugin) self.target_object = TestDbIntPlugin() # Patch the context ctx_patcher = mock.patch('neutron.context', autospec=True) mock_context = ctx_patcher.start() self.addCleanup(db_api.clear_db) self.context = mock_context.get_admin_context() # This ensure also calls to elevated work in unit tests self.context.elevated.return_value = self.context self.context.session = db_api.get_session() # Create sample data for tests self.ext_net_id = _uuid() self.int_net_id = _uuid() self.int_sub_id = _uuid() self.tenant_id = 'the_tenant' self.network = models_v2.Network(id=self.ext_net_id, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.net_ext = external_net_db.ExternalNetwork( network_id=self.ext_net_id) self.context.session.add(self.network) # The following is to avoid complains from sqlite on # foreign key violations self.context.session.flush() self.context.session.add(self.net_ext) self.router = l3_db.Router(id=_uuid(), name=None, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE, enable_snat=True, gw_port_id=None) self.context.session.add(self.router) self.context.session.flush() self.router_gw_port = models_v2.Port( id=FAKE_GW_PORT_ID, tenant_id=self.tenant_id, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, admin_state_up=True, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_GW_PORT_MAC, network_id=self.ext_net_id) self.router.gw_port_id = self.router_gw_port.id self.context.session.add(self.router) self.context.session.add(self.router_gw_port) self.context.session.flush() self.fip_ext_port = models_v2.Port( id=FAKE_FIP_EXT_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_FLOATINGIP, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_FIP_EXT_PORT_MAC, network_id=self.ext_net_id) self.context.session.add(self.fip_ext_port) self.context.session.flush() self.int_net = models_v2.Network(id=self.int_net_id, tenant_id=self.tenant_id, admin_state_up=True, status=constants.NET_STATUS_ACTIVE) self.int_sub = models_v2.Subnet(id=self.int_sub_id, tenant_id=self.tenant_id, ip_version=4, cidr='3.3.3.0/24', gateway_ip='3.3.3.1', network_id=self.int_net_id) self.router_port = models_v2.Port( id=FAKE_ROUTER_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id=self.router.id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_ROUTER_PORT_MAC, network_id=self.int_net_id) self.router_port_ip_info = models_v2.IPAllocation( port_id=self.router_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.1') self.context.session.add(self.int_net) self.context.session.add(self.int_sub) self.context.session.add(self.router_port) self.context.session.add(self.router_port_ip_info) self.context.session.flush() self.fip_int_port = models_v2.Port(id=FAKE_FIP_INT_PORT_ID, tenant_id=self.tenant_id, admin_state_up=True, device_id='something', device_owner='compute:nova', status=constants.PORT_STATUS_ACTIVE, mac_address=FAKE_FIP_INT_PORT_MAC, network_id=self.int_net_id) self.fip_int_ip_info = models_v2.IPAllocation( port_id=self.fip_int_port.id, network_id=self.int_net.id, subnet_id=self.int_sub_id, ip_address='3.3.3.3') self.fip = l3_db.FloatingIP(id=_uuid(), floating_ip_address='1.1.1.2', floating_network_id=self.ext_net_id, floating_port_id=FAKE_FIP_EXT_PORT_ID, fixed_port_id=None, fixed_ip_address=None, router_id=None) self.context.session.add(self.fip_int_port) self.context.session.add(self.fip_int_ip_info) self.context.session.add(self.fip) self.context.session.flush() self.fip_request = { 'port_id': FAKE_FIP_INT_PORT_ID, 'tenant_id': self.tenant_id }
def test_set_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] sg = ['security-groups'] orig_sg = None trunk_port = models_v2.Port(tenant_id=tenant_id, network_id='net-trunk', device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port mechanism_arista.db_lib.is_network_provisioned.return_value = True resource = 'SubPort' event = 'AFTER_CREATE' trigger = 'AristaDriver' sp = dict( models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id, name='subport')) sp['security_groups'] = ['security-groups'] subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[subport]) segments = [{ 'segmentation_id': 12, 'physical_network': 'default', 'id': 'segment_id', 'network_type': 'vlan' }] bindings = [] self.drv.ndb.get_port.return_value = sp self.drv.ndb.get_network_id_from_port_id.return_value = network_id mechanism_arista.db_lib.get_network_segments_by_port_id.return_value = \ segments self.drv.set_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.get_network_segments_by_port_id('p1'), mock.call.plug_port_into_network(vm_id, host_id, port_id, network_id, tenant_id, 'subport', 'trunk:subport', sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)