def add_port_binding(session, port_id): with session.begin(subtransactions=True): record = models.PortBinding( port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND) session.add(record) return record
def _setup_neutron_portbinding(self, port_id, vif_type, host, status=constants.ACTIVE): with db_api.CONTEXT_WRITER.using(self.ctx): self.ctx.session.add(models.PortBinding(port_id=port_id, vif_type=vif_type, host=host, status=status))
def test_process_binding_port_host_id_changed(self): ctx = context.get_admin_context() plugin = directory.get_plugin() host_id = {portbindings.HOST_ID: 'host1'} with self.port(**host_id) as port: # Since the port is DOWN at first # It's necessary to make its status ACTIVE for this test plugin.update_port_status(ctx, port['port']['id'], const.PORT_STATUS_ACTIVE) attrs = port['port'] attrs['status'] = const.PORT_STATUS_ACTIVE original_port = attrs.copy() attrs['binding:host_id'] = 'host2' updated_port = attrs.copy() network = {'id': attrs['network_id']} binding = ml2_models.PortBinding( port_id=original_port['id'], host=original_port['binding:host_id'], vnic_type=original_port['binding:vnic_type'], profile=jsonutils.dumps(original_port['binding:profile']), vif_type=original_port['binding:vif_type'], vif_details=original_port['binding:vif_details']) levels = [] mech_context = driver_context.PortContext( plugin, ctx, updated_port, network, binding, levels, original_port=original_port) plugin._process_port_binding(mech_context, port['port']) self.assertEqual(const.PORT_STATUS_DOWN, updated_port['status']) port_dict = plugin.get_port(ctx, port['port']['id']) self.assertEqual(const.PORT_STATUS_DOWN, port_dict['status'])
def _process_portbindings_create_and_update(self, context, port_data, port): binding_profile = port.get(portbindings.PROFILE) binding_profile_set = validators.is_attr_set(binding_profile) if not binding_profile_set and binding_profile is not None: del port[portbindings.PROFILE] binding_vnic = port.get(portbindings.VNIC_TYPE) binding_vnic_set = validators.is_attr_set(binding_vnic) if not binding_vnic_set and binding_vnic is not None: del port[portbindings.VNIC_TYPE] host = port_data.get(portbindings.HOST_ID) host_set = validators.is_attr_set(host) with context.session.begin(subtransactions=True): bind_port = context.session.query( models.PortBinding).filter_by(port_id=port['id']).first() if host_set: if not bind_port: context.session.add( models.PortBinding(port_id=port['id'], host=host, vif_type=self.vif_type)) else: bind_port.host = host else: host = bind_port.host if bind_port else None self._extend_port_dict_binding_host(port, host)
def _setup_neutron_portbinding(self, port_id, vif_type, host, status=constants.ACTIVE): with db_api.context_manager.writer.using(self.ctx): self.ctx.session.add(models.PortBinding(port_id=port_id, vif_type=vif_type, host=host, status=status))
def test_unset_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 device_owner = 'trunk:subport' host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] trunk_port = models_v2.Port(tenant_id=tenant_id, network_id=network_id, device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.tenant_provisioned.return_value = False mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port resource = 'SubPort' event = 'AFTER_DELETE' trigger = 'AristaDriver' subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[trunk_models.SubPort()]) sp = models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id) self.drv.ndb.get_port.return_value = sp self.drv.unset_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.unplug_port_from_network(vm_id, device_owner, host_id, port_id, network_id, tenant_id, [], vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group([], profile), mock.call.tenant_provisioned(tenant_id), mock.call.delete_tenant(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)
def add_binding_unbound(context, port_id): # ml2 add_port_binding equiv context.session.add( ml2_models.PortBinding(port_id=port_id, vif_type='unbound', vnic_type='normal', profile='', vif_details='', status='ACTIVE'))
def ensure_port_binding(session, port_id): with session.begin(subtransactions=True): try: record = (session.query( models.PortBinding).filter_by(port_id=port_id).one()) except exc.NoResultFound: record = models.PortBinding(port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND) session.add(record) return record
def test_status(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.status = 'foostatus' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('foostatus', ctx.status)
def test_port_emtpy_tenant_id_work_around(self): """Validate the work around code of port creation""" plugin = mock.Mock() plugin_context = mock.Mock() network = self._get_mock_operation_context( odl_const.ODL_NETWORK).current port = self._get_mock_operation_context(odl_const.ODL_PORT).current tenant_id = network['tenant_id'] port['tenant_id'] = '' binding = models.PortBinding() with mock.patch.object(segments_db, 'get_network_segments'): context = driver_context.PortContext( plugin, plugin_context, port, network, binding, 0, None) self.mech.odl_drv.FILTER_MAP[ odl_const.ODL_PORTS].filter_create_attributes(port, context) self.assertEqual(tenant_id, port['tenant_id'])
def test_host_super(self): plugin = mock.Mock() plugin_context = mock.Mock() network = mock.MagicMock() binding = models.PortBinding() port = { 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, portbindings.HOST_ID: 'host' } binding.host = 'foohost' with mock.patch.object(driver_context.segments_db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) self.assertEqual('host', ctx.host)
def test_simple_sync_all_with_HTTPError_not_found(self): self.given_back_end.out_of_sync = True ml2_plugin = plugin.Ml2Plugin() port_mock = mock.MagicMock(port_binding=models.PortBinding()) response = mock.Mock(status_code=requests.codes.not_found) fake_exception = requests.exceptions.HTTPError('Test', response=response) def side_eff(*args, **kwargs): # HTTP ERROR exception with 404 status code will be raised when use # sendjson to get the object in ODL DB if args[0] == 'get': raise fake_exception with mock.patch.object(client.OpenDaylightRestClient, 'sendjson', side_effect=side_eff), \ mock.patch.object(plugin.Ml2Plugin, 'get_networks', return_value=[FAKE_NETWORK.copy()]), \ mock.patch.object(plugin.Ml2Plugin, 'get_network', return_value=FAKE_NETWORK.copy()), \ mock.patch.object(plugin.Ml2Plugin, 'get_subnets', return_value=[FAKE_SUBNET.copy()]), \ mock.patch.object(plugin.Ml2Plugin, '_get_port', return_value=port_mock), \ mock.patch.object(plugin.Ml2Plugin, 'get_ports', return_value=[FAKE_PORT.copy()]), \ mock.patch.object(plugin.Ml2Plugin, 'get_security_groups', return_value=[FAKE_SECURITY_GROUP.copy()]), \ mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules', return_value=[FAKE_SECURITY_GROUP_RULE.copy()]): self.given_back_end.sync_full(ml2_plugin) sync_id_list = [FAKE_SECURITY_GROUP['id'], FAKE_SECURITY_GROUP_RULE['id'], FAKE_NETWORK['id'], FAKE_SUBNET['id'], FAKE_PORT['id']] act = [] for args, kwargs in \ client.OpenDaylightRestClient.sendjson.call_args_list: if args[0] == 'post': for key in args[2]: act.append(args[2][key][0]['id']) self.assertEqual(act, sync_id_list)
def add_binding_bound(context, port_id, segment_id, host, interface_name): context.session.add( ml2_models.PortBindingLevel(port_id=port_id, host=host, level=0, driver='midonet', segment_id=segment_id)) profile = {} if interface_name is not None: profile['interface_name'] = interface_name context.session.add( ml2_models.PortBinding(port_id=port_id, host=host, vif_type='midonet', vnic_type='normal', profile=jsonutils.dumps(profile), vif_details=jsonutils.dumps( {'port_filter': True}), status='ACTIVE'))
def _process_portbindings_create_and_update( self, context, port, port_res, vif_type=nsx_constants.VIF_TYPE_DVS): super(NsxPortBindingMixin, self)._process_portbindings_create_and_update( context, port, port_res) port_id = port_res['id'] org_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type( context.session, port_id) vnic_type = port.get(pbin.VNIC_TYPE, org_vnic_type) cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type) == pbin.VNIC_NORMAL) vif_details = {pbin.CAP_PORT_FILTER: cap_port_filter} network = self.get_network(context, port_res['network_id']) if network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT: vif_details[pbin.VIF_DETAILS_VLAN] = FLAT_VLAN elif network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN: vif_details[pbin.VIF_DETAILS_VLAN] = network[pnet.SEGMENTATION_ID] with db_api.CONTEXT_WRITER.using(context): port_binding = context.session.query( pbin_model.PortBinding).filter_by(port_id=port_id).first() if not port_binding: port_binding = pbin_model.PortBinding( port_id=port_id, vif_type=vif_type) context.session.add(port_binding) port_binding.host = port_res[pbin.HOST_ID] or '' port_binding.vnic_type = vnic_type port_binding.vif_details = jsonutils.dumps(vif_details) nsxv_db.update_nsxv_port_ext_attributes( context.session, port_id, vnic_type) profile = port.get(pbin.PROFILE, constants.ATTR_NOT_SPECIFIED) if validators.is_attr_set(profile) or profile is None: port_binding.profile = (jsonutils.dumps(profile) if profile else "") port_res[pbin.VNIC_TYPE] = vnic_type self.extend_port_portbinding(port_res, port_binding)
def test_update_port_filter(self): """Validate the filter code on update port operation""" items_to_filter = ['network_id', 'id', 'status', 'tenant_id'] plugin_context = mock.Mock() network = self._get_mock_operation_context( odl_const.ODL_NETWORK).current subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current port = self._get_mock_operation_context(odl_const.ODL_PORT).current port['fixed_ips'] = [{'subnet_id': subnet['id'], 'ip_address': '10.0.0.10'}] port['mac_address'] = port['mac_address'].upper() orig_port = copy.deepcopy(port) binding = models.PortBinding() with mock.patch.object(segments_db, 'get_network_segments'): context = driver_context.PortContext( plugin, plugin_context, port, network, binding, 0, None) self.mech.odl_drv.FILTER_MAP[ odl_const.ODL_PORTS].filter_update_attributes(port, context) for key, value in port.items(): if key not in items_to_filter: self.assertEqual(orig_port[key], value)
def test_trunk_port(self): with self.session.begin(subtransactions=True): self.session.add(trunk_model.SubPort( port_id=self.port_id_2, trunk_id=self.trunk_id_1, segmentation_type="vlan", segmentation_id=11 )) self.session.add(ml2_models.PortBinding( port_id=self.port_id_2, host=self.host, vif_type="ovs" )) self.session.add(ml2_models.PortBindingLevel( port_id=self.port_id_2, host=self.host, driver=nsxv3_constants.NSXV3, level=1 )) port_2 = db.get_port(self.ctx, self.host, self.port_id_2) self.assertDictSupersetOf( { "id": self.port_id_2, "parent_id": self.port_id_1, "traffic_tag": 11, "admin_state_up": True, "status": "ACTIVE", "qos_policy_id": "", "security_groups": [], "address_bindings": [], "revision_number": 0, "binding:host_id": "test", "vif_details": "", "binding:vnic_type": "normal", "binding:vif_type": "ovs" }, port_2)
def _setup_neutron_portbinding(self, port_id, vif_type, host): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add( models.PortBinding(port_id=port_id, vif_type=vif_type, host=host))
def _populate_neutron_db(self): self.plugin.create_network(self.ctx, {"network": { "tenant_id": self.tenant_id, "id": self.net_id, "shared": False, "name": "test_net_1", "admin_state_up": True, "description": "" }}) self.plugin.create_subnetpool(self.ctx, {"subnetpool": { "tenant_id": self.tenant_id, "id": self.ip_pool_id, "name": "default_test_pool", "prefixes": ["192.168.0.0", "192.168.1.0", "192.168.2.0"], # "min_prefix": 16, "min_prefixlen": 16, # "max_prefix": "", "max_prefixlen": 32, # "default_prefix": "", "default_prefixlen": 32, # "default_quota": "", # "address_scope_id": "", "is_default": True, "shared": True, "description": "" }}) self.plugin.create_port(self.ctx, {"port": { "tenant_id": self.tenant_id, "name": "test_port_1", "id": self.port_id_1, "network_id": self.net_id, "fixed_ips": constants.ATTR_NOT_SPECIFIED, "admin_state_up": True, "device_id": "123", "device_owner": "admin", "description": "" }}) self.plugin.create_port(self.ctx, {"port": { "tenant_id": self.tenant_id, "name": "test_port_2", "id": self.port_id_2, "network_id": self.net_id, "fixed_ips": constants.ATTR_NOT_SPECIFIED, "admin_state_up": True, "device_id": "1234", "device_owner": "admin", "description": "" }}) subnet = self.plugin.create_subnet(self.ctx, {"subnet": { "tenant_id": self.tenant_id, "name": "subnet_192_168", "cidr": "192.168.0.0/32", "ip_version": 4, "network_id": self.net_id, "subnetpool_id": self.ip_pool_id, "allocation_pools": [], "enable_dhcp": True, "dns_nameservers": [], "host_routes": [] }}) neutron_db = [ ml2_models.PortBinding( port_id=self.port_id_1, host=self.host, vif_type="ovs" ), ml2_models.PortBindingLevel( port_id=self.port_id_1, host=self.host, driver=nsxv3_constants.NSXV3, level=1 ), models_v2.IPAllocation( port_id=self.port_id_1, ip_address="192.168.0.100", subnet_id=subnet.get("id"), network_id=self.net_id ), QosPolicy( id=self.qos_id_1, project_id=self.tenant_id, name="Test_QOS_1" ), trunk_model.Trunk( id=self.trunk_id_1, project_id=self.tenant_id, name="test_trunk_1", port_id=self.port_id_1 ), sg_model.SecurityGroup( id=self.sg_id_1, project_id=self.tenant_id, name="test_sg_1", ) ] with self.session.begin(subtransactions=True): for entry in neutron_db: self.session.add(entry)
def add_port_binding(context, port_id): record = models.PortBinding(port_id=port_id, vif_type=portbindings.VIF_TYPE_UNBOUND) context.session.add(record) return record
def test_set_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] sg = ['security-groups'] orig_sg = None trunk_port = models_v2.Port(tenant_id=tenant_id, network_id='net-trunk', device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port mechanism_arista.db_lib.is_network_provisioned.return_value = True resource = 'SubPort' event = 'AFTER_CREATE' trigger = 'AristaDriver' sp = dict( models_v2.Port(id=port_id, device_owner='trunk:subport', network_id=network_id, name='subport')) sp['security_groups'] = ['security-groups'] subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[subport]) segments = [{ 'segmentation_id': 12, 'physical_network': 'default', 'id': 'segment_id', 'network_type': 'vlan' }] bindings = [] self.drv.ndb.get_port.return_value = sp self.drv.ndb.get_network_id_from_port_id.return_value = network_id mechanism_arista.db_lib.get_network_segments_by_port_id.return_value = \ segments self.drv.set_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.get_network_segments_by_port_id('p1'), mock.call.plug_port_into_network(vm_id, host_id, port_id, network_id, tenant_id, 'subport', 'trunk:subport', sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings), ] mechanism_arista.db_lib.assert_has_calls(expected_calls)