def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = obj_reg.load_class('NetworkDNSDomain').get_object( plugin_context, network_id=net_id) if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value net_dns_domain.update() else: net_dns_domain.delete() db_data[dns.DNSDOMAIN] = '' elif new_value: obj_reg.new_instance('NetworkDNSDomain', plugin_context, network_id=net_id, dns_domain=new_value).create() db_data[dns.DNSDOMAIN] = new_value
def process_create_network(self, plugin_context, request_data, db_data): dns_domain = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if dns_domain: obj_reg.new_instance('NetworkDNSDomain', plugin_context, network_id=db_data['id'], dns_domain=dns_domain).create() db_data[dns.DNSDOMAIN] = dns_domain
def _update_dns_db(self, dns_name, dns_domain, db_data, plugin_context, has_fixed_ips): dns_data_db = obj_reg.load_class('PortDNS').get_object( plugin_context, port_id=db_data['id']) if dns_data_db: is_dns_name_changed = (dns_name is not None and dns_data_db['current_dns_name'] != dns_name) if is_dns_name_changed or (has_fixed_ips and dns_data_db['current_dns_name']): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: dns_data_db[dns.DNSNAME] = dns_name dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' dns_data_db.update() return dns_data_db if dns_name: dns_data_db = obj_reg.new_instance('PortDNS', plugin_context, port_id=db_data['id'], current_dns_name=dns_name, current_dns_domain=dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db
def _update_dns_db(self, dns_name, dns_domain, db_data, plugin_context, has_fixed_ips): dns_data_db = obj_reg.load_class('PortDNS').get_object( plugin_context, port_id=db_data['id']) if dns_data_db: is_dns_name_changed = (dns_name is not None and dns_data_db['current_dns_name'] != dns_name) if is_dns_name_changed or (has_fixed_ips and dns_data_db['current_dns_name']): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: dns_data_db[dns.DNSNAME] = dns_name dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' dns_data_db.update() return dns_data_db if dns_name: dns_data_db = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name=dns_name, current_dns_domain=dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db
def test_bw_rule_create_profile_maximal_val(self, *mocks): # test driver precommit with an invalid burst value bad_burst = qos_utils.MAX_BURST_MAX_VALUE + 1 rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 1025, 'max_burst_kbps': bad_burst } } rule = obj_reg.new_instance('QosBandwidthLimitRule', self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = QoSPolicyObject(self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', return_value=_policy),\ mock.patch('neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data)
def _get_ports_with_policy(self, context, policy): networks_ids = policy.get_bound_networks() ports_with_net_policy = obj_reg.load_class('Port').get_objects( context, network_id=networks_ids) # Filter only these ports which don't have overwritten policy ports_with_net_policy = [ port for port in ports_with_net_policy if port.qos_policy_id is None ] ports_ids = policy.get_bound_ports() ports_with_policy = obj_reg.load_class('Port').get_objects( context, id=ports_ids) t_ports = list(set(ports_with_policy + ports_with_net_policy)) t_ctx = t_context.get_context_from_neutron_context(context) for t_port in t_ports: mappings = db_api.get_bottom_mappings_by_top_id( t_ctx, t_port.id, t_constants.RT_PORT) if mappings: b_pod, b_port_id = mappings[0] b_region_name = b_pod['region_name'] b_client = self._get_client(region_name=b_region_name) b_port = b_client.get_ports(t_ctx, b_port_id) new_binding = obj_reg.new_instance( 'PortBinding', port_id=t_port.id, vif_type=b_port.get('binding:vif_type', portbindings.VIF_TYPE_UNBOUND), vnic_type=b_port.get('binding:vnic_type', portbindings.VNIC_NORMAL) ) t_port.binding = new_binding else: new_binding = obj_reg.new_instance( 'PortBinding', port_id=t_port.id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL ) t_port.binding = new_binding return t_ports
def _make_subnet(self, network_id): _subnet = obj_reg.new_instance('Subnet', self.context, network_id=network_id, ip_version=4, cidr=netaddr.IPNetwork(CIDR), gateway_ip=GW_IP) _subnet.create() return _subnet
def _get_ports_with_policy(self, context, policy): networks_ids = policy.get_bound_networks() ports_with_net_policy = obj_reg.load_class('Port').get_objects( context, network_id=networks_ids) # Filter only these ports which don't have overwritten policy ports_with_net_policy = [ port for port in ports_with_net_policy if port.qos_policy_id is None ] ports_ids = policy.get_bound_ports() ports_with_policy = obj_reg.load_class('Port').get_objects( context, id=ports_ids) t_ports = list(set(ports_with_policy + ports_with_net_policy)) t_ctx = t_context.get_context_from_neutron_context(context) for t_port in t_ports: mappings = db_api.get_bottom_mappings_by_top_id( t_ctx, t_port.id, t_constants.RT_PORT) if mappings: b_pod, b_port_id = mappings[0] b_region_name = b_pod['region_name'] b_client = self._get_client(region_name=b_region_name) b_port = b_client.get_ports(t_ctx, b_port_id) new_binding = obj_reg.new_instance( 'PortBinding', port_id=t_port.id, vif_type=b_port.get('binding:vif_type', portbindings.VIF_TYPE_UNBOUND), vnic_type=b_port.get('binding:vnic_type', portbindings.VNIC_NORMAL)) t_port.binding = new_binding else: new_binding = obj_reg.new_instance( 'PortBinding', port_id=t_port.id, vif_type=portbindings.VIF_TYPE_UNBOUND, vnic_type=portbindings.VNIC_NORMAL) t_port.binding = new_binding return t_ports
def test_get_objects_from_network_id(self): router_ = obj_reg.new_instance('Router', self.context) router_.create() self.project = uuidutils.generate_uuid() # put a network behind a router network_ = obj_reg.new_instance('Network', self.context) network_.create() subnet_ = self._make_subnet(network_.id) self._connect_router_network(router_.id, network_.id) bgpvpn_ = self._create_test_bgpvpn() router_assoc_ = bgpvpn_obj.BGPVPNRouterAssociation( self.context, project_id=self.project, router_id=router_.id, bgpvpn_id=bgpvpn_.id) router_assoc_.create() # unrelated router and BGPVPN router_2 = obj_reg.new_instance('Router', self.context) router_2.create() router_assoc_2 = bgpvpn_obj.BGPVPNRouterAssociation( self.context, project_id=self.project, router_id=router_2.id, bgpvpn_id=self._create_test_bgpvpn_id()) router_assoc_2.create() # test get_objects get_assocs = bgpvpn_obj.BGPVPNRouterAssociation.get_objects( self.context, network_id=network_.id) self.assertEqual(1, len(get_assocs)) self.assertEqual(get_assocs[0].bgpvpn.id, bgpvpn_.id) self.assertIn( subnet_.id, [s['id'] for s in get_assocs[0].all_subnets(network_.id)])
def setUp(self): super(BaGPipePortHopsObjectTestCase, self).setUp() self.context = context.get_admin_context() self.ingress_network = obj_reg.new_instance('Network', self.context) self.ingress_network.create() self.ingress_port = obj_reg.new_instance( 'Port', self.context, network_id=self.ingress_network.id, mac_address=netaddr.EUI(INGRESS_MAC, dialect=netaddr.mac_unix_expanded), device_id='test_device_id', device_owner='compute:None', status="DUMMY_STATUS", admin_state_up=True) self.ingress_port.create() self.egress_network = obj_reg.new_instance('Network', self.context) self.egress_network.create() self.egress_port = obj_reg.new_instance( 'Port', self.context, network_id=self.egress_network.id, mac_address=netaddr.EUI(EGRESS_MAC, dialect=netaddr.mac_unix_expanded), device_id='test_device_id', device_owner='compute:None', status="DUMMY_STATUS", admin_state_up=True) self.egress_port.create() self.port_chain1 = self._create_test_port_chain() self.chain_hop1 = (self._create_test_chain_hop( RT1, ingress_network=self.egress_network.id)) self.chain_hop2 = (self._create_test_chain_hop( RT2, egress_network=self.ingress_network.id))
def create_security_group_without_rules(self, context, security_group, default_sg, is_provider): """Create a neutron security group, without any default rules. This method creates a security group that does not by default enable egress traffic which normal neutron security groups do. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, payload=events.DBEventPayload( context, metadata={'is_default': default_sg}, request_body=security_group, desired_state=s)) tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) with db_api.CONTEXT_WRITER.using(context): sg = obj_reg.new_instance('SecurityGroup', context, id=s.get('id') or uuidutils.generate_uuid(), description=s.get('description', ''), project_id=tenant_id, name=s.get('name', ''), is_default=default_sg) # Note(asarfaty): for unknown reason, removing the 'is_default' # here allows the loading of the ext_properties of the security # group. If not - we will get DetachedInstanceError if 'is_default' in sg.fields_no_update: sg.fields_no_update.remove('is_default') sg.create() secgroup_dict = self._make_security_group_dict(sg) secgroup_dict[sg_policy.POLICY] = s.get(sg_policy.POLICY) secgroup_dict[provider_sg.PROVIDER] = is_provider kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict
def _connect_router_network(self, router_id, network_id, subnet_id=None, gw_network=False): port = obj_reg.new_instance( 'Port', self.context, network_id=network_id, mac_address=netaddr.EUI(GW_MAC, dialect=netaddr.mac_unix_expanded), device_id='test_device_id', device_owner=constants.DEVICE_OWNER_ROUTER_INTF, status="DUMMY_STATUS", admin_state_up=True) if gw_network: port.device_owner = constants.DEVICE_OWNER_ROUTER_GW port.create() if subnet_id: allocation = obj_reg.new_instance( 'IPAllocation', self.context, port_id=port.id, subnet_id=subnet_id, network_id=network_id, ip_address=netaddr.IPNetwork(GW_IP)) allocation.create() port.fixed_ips = [allocation] port.update() router_if = obj_reg.new_instance('RouterPort', self.context, router_id=router_id, port_id=port.id) router_if.create()
def add_segment(context, network_id, network_type): # NOTE(yamamoto): The code fragment is a modified copy of segments_db.py. # We don't want to make callback notifications. segment_id = uuidutils.generate_uuid() netseg_obj = obj_reg.new_instance('NetworkSegment', context, id=segment_id, network_id=network_id, network_type=network_type, physical_network=None, segmentation_id=None, segment_index=0, is_dynamic=False) netseg_obj.create() return segment_id
def _process_only_dns_name_update(self, plugin_context, db_data, dns_name): dns_data_db = obj_reg.load_class('PortDNS').get_object( plugin_context, port_id=db_data['id']) if dns_data_db: dns_data_db['dns_name'] = dns_name dns_data_db.update() return dns_data_db if dns_name: dns_data_db = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name='', current_dns_domain='', previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db
def create_security_group_without_rules(self, context, security_group, default_sg, is_provider): """Create a neutron security group, without any default rules. This method creates a security group that does not by default enable egress traffic which normal neutron security groups do. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, payload=events.DBEventPayload( context, metadata={'is_default': default_sg}, request_body=security_group, desired_state=s)) tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) with db_api.CONTEXT_WRITER.using(context): sg = obj_reg.new_instance( 'SecurityGroup', context, id=s.get('id') or uuidutils.generate_uuid(), description=s.get('description', ''), project_id=tenant_id, name=s.get('name', ''), is_default=default_sg) # Note(asarfaty): for unknown reason, removing the 'is_default' # here allows the loading of the ext_properties of the security # group. If not - we will get DetachedInstanceError if 'is_default' in sg.fields_no_update: sg.fields_no_update.remove('is_default') sg.create() secgroup_dict = self._make_security_group_dict(sg) secgroup_dict[sg_policy.POLICY] = s.get(sg_policy.POLICY) secgroup_dict[provider_sg.PROVIDER] = is_provider kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict
def test_subnets(self): for obj in self.objs: obj.create() self.assertItemsEqual(obj.subnets, [_subnet_dict()]) # plug a router _router = obj_reg.new_instance('Router', self.context) _router.create() self._connect_router_network(_router.id, self.network_id, self.subnet.id) # check .subnets in associations, after refreshing for obj in self.objs: refreshed_obj = bgpvpn_obj.BGPVPNNetAssociation.get_object( self.context, id=obj.id) self.assertItemsEqual(refreshed_obj.subnets, [_subnet_dict(GW_MAC)])
def _create_port_dns_record(self, plugin_context, request_data, db_data, network, dns_name): external_dns_domain = (request_data.get(dns.DNSDOMAIN) or network.get(dns.DNSDOMAIN)) current_dns_name, current_dns_domain = ( self._calculate_current_dns_name_and_domain( dns_name, external_dns_domain, self.external_dns_not_needed(plugin_context, network))) dns_data_obj = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name=current_dns_name, current_dns_domain=current_dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name, dns_domain=request_data.get(dns.DNSDOMAIN, '')) dns_data_obj.create() return dns_data_obj
def _create_floatingip(self, context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] fip_id = uuidutils.generate_uuid() f_net_id = fip['floating_network_id'] if not self._core_plugin._network_is_external(context, f_net_id): msg = _("Network %s is not a valid external network") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) self._validate_network_for_floatingip(context, f_net_id) # This external port is never exposed to the tenant. # it is used purely for internal system and admin use when # managing floating IPs. port = { 'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, 'admin_state_up': True, 'device_id': 'PENDING', 'device_owner': DEVICE_OWNER_FLOATINGIP, 'status': n_const.PORT_STATUS_NOTAPPLICABLE, 'name': '' } # Both subnet_id and floating_ip_address are accepted, if # floating_ip_address is not in the subnet, # InvalidIpForSubnet exception will be raised. fixed_ip = {} if fip['subnet_id']: fixed_ip['subnet_id'] = fip['subnet_id'] if fip['floating_ip_address']: fixed_ip['ip_address'] = fip['floating_ip_address'] if fixed_ip: port['fixed_ips'] = [fixed_ip] # 'status' in port dict could not be updated by default, use # check_allow_post to stop the verification of system # TODO(boden): rehome create_port into neutron-lib external_port = plugin_utils.create_port(self._core_plugin, context.elevated(), {'port': port}, check_allow_post=False) with plugin_utils.delete_port_on_error( self._core_plugin, context.elevated(), external_port['id']),\ context.session.begin(subtransactions=True): external_ips = self._port_fixed_ips_for_floatingip(external_port) if not external_ips: raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) floating_fixed_ip = external_ips[0] floating_ip_address = floating_fixed_ip['ip_address'] floatingip_obj = obj_reg.new_instance( 'FloatingIP', context, id=fip_id, project_id=fip['tenant_id'], status=initial_status, floating_network_id=fip['floating_network_id'], floating_ip_address=floating_ip_address, floating_port_id=external_port['id'], description=fip.get('description')) # Update association with internal port # and define external IP address assoc_result = self._update_fip_assoc(context, fip, floatingip_obj) floatingip_obj.create() floatingip_dict = self._make_floatingip_dict( floatingip_obj, process_extensions=False) if self._is_dns_integration_supported: dns_data = self._process_dns_floatingip_create_precommit( context, floatingip_dict, fip) # NOTE(yamamoto): MidoNet doesn't have Floating IP QoS # if self._is_fip_qos_supported: # self._process_extra_fip_qos_create(context, fip_id, fip) floatingip_obj = obj_reg.load_class('FloatingIP').get_object( context, id=floatingip_obj.id) floatingip_db = floatingip_obj.db_obj registry.notify(resources.FLOATING_IP, events.PRECOMMIT_CREATE, self, context=context, floatingip=fip, floatingip_id=fip_id, floatingip_db=floatingip_db) self._core_plugin.update_port(context.elevated(), external_port['id'], {'port': { 'device_id': fip_id }}) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, **assoc_result) if self._is_dns_integration_supported: self._process_dns_floatingip_create_postcommit( context, floatingip_dict, dns_data) # TODO(lujinluo): Change floatingip_db to floatingip_obj once all # codes are migrated to use Floating IP OVO object. resource_extend.apply_funcs(l3_apidef.FLOATINGIPS, floatingip_dict, floatingip_db) return floatingip_dict
def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': { 'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True } } self.rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150 } } self.ingress_rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 3000, 'max_burst_kbps': 350, 'direction': 'ingress' } } self.dscp_rule_data = { 'dscp_marking_rule': { 'id': uuidutils.generate_uuid(), 'dscp_mark': 22 } } self.policy = QoSPolicyObject(self.ctxt, **self.policy_data['policy']) # egress BW limit rule self.rule = obj_reg.new_instance( 'QosBandwidthLimitRule', self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw limit rule self.ingress_rule = obj_reg.new_instance( 'QosBandwidthLimitRule', self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) self.dscp_rule = obj_reg.new_instance( 'QosDscpMarkingRule', self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper()