def _determine_port_security_and_has_ip(self, context, port): """Returns a tuple of booleans (port_security_enabled, has_ip). Port_security is the value associated with the port if one is present otherwise the value associated with the network is returned. has_ip is if the port is associated with an ip or not. """ has_ip = self._ip_on_port(port) # we don't apply security groups for dhcp, router if port.get('device_owner') and net.is_port_trusted(port): return (False, has_ip) if validators.is_attr_set(port.get(psec.PORTSECURITY)): port_security_enabled = port[psec.PORTSECURITY] # If port has an ip and security_groups are passed in # conveniently set port_security_enabled to true this way # user doesn't also have to pass in port_security_enabled=True # when creating ports. elif has_ip and validators.is_attr_set(port.get('security_groups')): port_security_enabled = True else: port_security_enabled = self._get_network_security_binding( context, port['network_id']) return (port_security_enabled, has_ip)
def _fill_provider_info(self, from_net, to_net): provider_attrs = provider_net.ATTRIBUTES for provider_attr in provider_attrs: if validators.is_attr_set(from_net.get(provider_attr)): to_net[provider_attr] = from_net[provider_attr] if validators.is_attr_set(from_net.get(az_def.AZ_HINTS)): to_net[az_def.AZ_HINTS] = from_net[az_def.AZ_HINTS]
def _check_invalid_security_groups_specified(self, context, port, only_warn=False): """Check if the lists of security groups are valid When only_warn is True we do not raise an exception here, because this may fail nova boot. Instead we will later remove provider security groups from the regular security groups list of the port. Since all the provider security groups of the tenant will be on this list anyway, the result will be the same. """ if validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): for sg in port.get(ext_sg.SECURITYGROUPS, []): # makes sure user doesn't add non-provider secgrp as secgrp if self._is_provider_security_group(context, sg): if only_warn: LOG.warning( "Ignored provider security group %(sg)s in " "security groups list for port %(id)s", {'sg': sg, 'id': port['id']}) else: raise provider_sg.SecurityGroupIsProvider(id=sg) if validators.is_attr_set( port.get(provider_sg.PROVIDER_SECURITYGROUPS)): # also check all provider groups are provider. for sg in port.get(provider_sg.PROVIDER_SECURITYGROUPS, []): self._check_provider_security_group_exists(context, sg)
def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['subnetpool_id'], subnet_args['ip_version']) subnet = models_v2.Subnet(**subnet_args) context.session.add(subnet) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = models_v2.DNSNameServer( address=server, order=order, subnet_id=subnet.id) context.session.add(dns) if validators.is_attr_set(host_routes): for rt in host_routes: route = models_v2.SubnetRoute( subnet_id=subnet.id, destination=rt['destination'], nexthop=rt['nexthop']) context.session.add(route) self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet
def process_create_policy_target_group(self, session, data, result): data = data['policy_target_group'] proxied = data.get('proxied_group_id') if validators.is_attr_set(proxied): # Set value for proxied group record = (session.query(db.GroupProxyMapping).filter_by( policy_target_group_id=proxied).first()) if record: if record.proxy_group_id: raise driver_proxy_group.InvalidProxiedGroup( group_id=proxied) record.proxy_group_id = result['id'] else: # Record may not exist for that PTG yet record = db.GroupProxyMapping( policy_target_group_id=proxied, proxy_group_id=result['id'], proxied_group_id=None) session.add(record) if not validators.is_attr_set(data.get('proxy_type')): data['proxy_type'] = driver_proxy_group.DEFAULT_PROXY_TYPE record = (session.query(db.GroupProxyMapping).filter_by( policy_target_group_id=result['id']).one()) record.proxy_type = data['proxy_type'] result['proxy_type'] = data['proxy_type'] # Proxy PTGs can't have chains enforced data['enforce_service_chains'] = False record = (session.query(db.GroupProxyMapping).filter_by( policy_target_group_id=result['id']).one()) record.enforce_service_chains = data['enforce_service_chains'] result['enforce_service_chains'] = data['enforce_service_chains'] elif validators.is_attr_set(data.get('proxy_type')): raise driver_proxy_group.ProxyTypeSetWithoutProxiedPTG()
def _process_portbindings_create_and_update(self, context, port_data, port): binding_profile = port.get(portbindings.PROFILE) binding_profile_set = validators.is_attr_set(binding_profile) if not binding_profile_set and binding_profile is not None: del port[portbindings.PROFILE] binding_vnic = port.get(portbindings.VNIC_TYPE) binding_vnic_set = validators.is_attr_set(binding_vnic) if not binding_vnic_set and binding_vnic is not None: del port[portbindings.VNIC_TYPE] # REVISIT(irenab) Add support for vnic_type for plugins that # can handle more than one type. # Currently implemented for ML2 plugin that does not use # PortBindingMixin. host = port_data.get(portbindings.HOST_ID) host_set = validators.is_attr_set(host) with context.session.begin(subtransactions=True): bind_port = context.session.query( PortBindingPort).filter_by(port_id=port['id']).first() if host_set: if not bind_port: context.session.add(PortBindingPort(port_id=port['id'], host=host)) else: bind_port.host = host else: host = bind_port.host if bind_port else None self._extend_port_dict_binding_host(port, host)
def _l7_params_conflict(fc1, fc2): if (validators.is_attr_set(fc1['l7_parameters']) and validators.is_attr_set(fc2['l7_parameters'])): if fc1['l7_parameters'] == fc2['l7_parameters']: return True return all(not validators.is_attr_set(fc['l7_parameters']) for fc in [fc1, fc2])
def _validate_flow_classifier(self, context): fc = context.current # Verify L7 params are set l7_p = fc['l7_parameters'] if any(x for x in sfc_cts.AIM_FLC_L7_PARAMS.keys() if not validators.is_attr_set(l7_p.get(x))): raise sfc_exc.BadFlowClassifier( params=sfc_cts.AIM_FLC_L7_PARAMS.keys()) # Verify standard params are set # TODO(ivar): src and dst prefix are needed only for SVI networks if any(x for x in sfc_cts.AIM_FLC_PARAMS if not validators.is_attr_set(fc.get(x))): raise sfc_exc.BadFlowClassifier(params=sfc_cts.AIM_FLC_PARAMS) # Verify networks exist src_net = self.plugin.get_network( context._plugin_context, l7_p[sfc_cts.LOGICAL_SRC_NET]) if l7_p[sfc_cts.LOGICAL_SRC_NET] != l7_p[sfc_cts.LOGICAL_DST_NET]: # Verify dst existence self.plugin.get_network(context._plugin_context, l7_p[sfc_cts.LOGICAL_DST_NET]) elif src_net.get('apic:svi') is False: # Same network, not SVI raise sfc_exc.FlowClassifierSameSrcDstNetworks() if validators.is_attr_set(fc.get('source_ip_prefix')) and ( fc.get('source_ip_prefix') == fc.get('destination_ip_prefix')): # Same subnet for source and dst is not allowed. For overlapping # (but not same) subnets LPM will be applied. raise sfc_exc.FlowClassifierSameSubnet()
def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['subnetpool_id'], subnet_args['ip_version']) service_types = subnet_args.pop('service_types', []) segment_id = subnet_args.get('segment_id') if segment_id: # TODO(slaweq): integrate check if segment exists in # self._validate_segment() method segment = network_obj.NetworkSegment.get_object(context, id=segment_id) if not segment: raise segment_exc.SegmentNotFound(segment_id=segment_id) subnet = subnet_obj.Subnet(context, **subnet_args) subnet.create() # TODO(slaweq): when check is segment exists will be integrated in # self._validate_segment() method, it should be moved to be done before # subnet object is created self._validate_segment(context, network['id'], segment_id) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = subnet_obj.DNSNameServer(context, address=server, order=order, subnet_id=subnet.id) dns.create() if validators.is_attr_set(host_routes): for rt in host_routes: route = subnet_obj.Route( context, subnet_id=subnet.id, destination=common_utils.AuthenticIPNetwork( rt['destination']), nexthop=netaddr.IPAddress(rt['nexthop'])) route.create() if validators.is_attr_set(service_types): for service_type in service_types: service_type_obj = subnet_obj.SubnetServiceType( context, subnet_id=subnet.id, service_type=service_type) service_type_obj.create() self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet_obj.Subnet.get_object(context, id=subnet.id)
def _make_subnet_args(self, detail, subnet, subnetpool_id): args = super(IpamBackendMixin, self)._make_subnet_args( detail, subnet, subnetpool_id) if validators.is_attr_set(subnet.get(segment.SEGMENT_ID)): args['segment_id'] = subnet[segment.SEGMENT_ID] if validators.is_attr_set(subnet.get('service_types')): args['service_types'] = subnet['service_types'] return args
def test_is_attr_set(self): data = constants.ATTR_NOT_SPECIFIED self.assertIs(validators.is_attr_set(data), False) data = None self.assertIs(validators.is_attr_set(data), False) data = "I'm set" self.assertIs(validators.is_attr_set(data), True)
def network_matches_filters(self, network, filters): if not filters: return True if any(validators.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): segments = [self._get_provider_segment(network)] elif validators.is_attr_set(network.get(mpnet.SEGMENTS)): segments = self._get_attribute(network, mpnet.SEGMENTS) else: return True return any(self._match_segment(s, filters) for s in segments)
def validate_pools_with_subnetpool(self, subnet): """Verifies that allocation pools are set correctly Allocation pools can be set for specific subnet request only """ has_allocpool = validators.is_attr_set(subnet['allocation_pools']) is_any_subnetpool_request = not validators.is_attr_set(subnet['cidr']) if is_any_subnetpool_request and has_allocpool: reason = _("allocation_pools allowed only " "for specific subnet requests.") raise exc.BadRequest(resource='subnets', msg=reason)
def update_port(self, context, old_port_db, old_port, new_port): """Update the port IPs Updates the port's IPs based on any new fixed_ips passed in or if deferred IP allocation is in effect because allocation requires host binding information that wasn't provided until port update. :param old_port_db: The port database record :param old_port: A port dict created by calling _make_port_dict. This must be called before calling this method in order to load data from extensions, specifically host binding. :param new_port: The new port data passed through the API. """ old_host = old_port.get(portbindings.HOST_ID) new_host = new_port.get(portbindings.HOST_ID) host = new_host if validators.is_attr_set(new_host) else old_host changes = self.update_port_with_ips(context, host, old_port_db, new_port, new_port.get('mac_address')) fixed_ips_requested = validators.is_attr_set(new_port.get('fixed_ips')) old_ips = old_port.get('fixed_ips') deferred_ip_allocation = ( old_port.get('ip_allocation') == ipalloc_apidef.IP_ALLOCATION_DEFERRED and host and not old_host and not old_ips and not fixed_ips_requested) if not deferred_ip_allocation: # Check that any existing IPs are valid on the new segment new_host_requested = host and host != old_host if old_ips and new_host_requested and not fixed_ips_requested: valid_subnets = self._ipam_get_subnets( context, old_port['network_id'], host, service_type=old_port.get('device_owner')) valid_subnet_ids = {s['id'] for s in valid_subnets} for fixed_ip in old_ips: if fixed_ip['subnet_id'] not in valid_subnet_ids: raise segment_exc.HostNotCompatibleWithFixedIps( host=host, port_id=old_port['id']) return changes # Allocate as if this were the port create. port_copy = copy.deepcopy(old_port) port_copy['fixed_ips'] = const.ATTR_NOT_SPECIFIED port_copy.update(new_port) context.session.expire(old_port_db, ['fixed_ips']) ips = self.allocate_ips_for_port_and_store( context, {'port': port_copy}, port_copy['id']) getattr(old_port_db, 'fixed_ips') # refresh relationship before return return self.Changes(add=ips, original=[], remove=[])
def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['subnetpool_id'], subnet_args['ip_version']) service_types = subnet_args.pop('service_types', []) subnet = models_v2.Subnet(**subnet_args) segment_id = subnet_args.get('segment_id') try: context.session.add(subnet) context.session.flush() except db_exc.DBReferenceError: raise segment_exc.SegmentNotFound(segment_id=segment_id) self._validate_segment(context, network['id'], segment_id) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = subnet_obj.DNSNameServer(context, address=server, order=order, subnet_id=subnet.id) dns.create() if validators.is_attr_set(host_routes): for rt in host_routes: route = subnet_obj.Route( context, subnet_id=subnet.id, destination=common_utils.AuthenticIPNetwork( rt['destination']), nexthop=netaddr.IPAddress(rt['nexthop'])) route.create() if validators.is_attr_set(service_types): for service_type in service_types: service_type_entry = sst_model.SubnetServiceType( subnet_id=subnet.id, service_type=service_type) context.session.add(service_type_entry) self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet
def _process_provider_create(self, network): if any(validators.is_attr_set(network.get(attr)) for attr in provider.ATTRIBUTES): # Verify that multiprovider and provider attributes are not set # at the same time. if validators.is_attr_set(network.get(mpnet.SEGMENTS)): raise mpnet.SegmentsSetInConjunctionWithProviders() segment = self._get_provider_segment(network) return [self._process_provider_segment(segment)] elif validators.is_attr_set(network.get(mpnet.SEGMENTS)): segments = [self._process_provider_segment(s) for s in network[mpnet.SEGMENTS]] mpnet.check_duplicate_segments(segments, self.is_partial_segment) return segments
def _is_ext_or_provider_net(self, subnet_request): """Return True if the network of the request is external or provider network """ network_id = subnet_request.network_id if network_id: network = self._fetch_network(self._context, network_id) if network.get(extnet_apidef.EXTERNAL): # external network return True if (validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)) or validators.is_attr_set(network.get(pnet.NETWORK_TYPE))): # provider network return True return False
def _is_external_network(network): external = network.get(external_net.EXTERNAL) external_set = validators.is_attr_set(external) if not external_set or not external: return False else: return True
def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ port = port['port'] if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): return if port.get('device_owner') and net.is_port_trusted(port): return port_sg = port.get(ext_sg.SECURITYGROUPS, []) tenant_id = port.get('tenant_id') sg_objs = sg_obj.SecurityGroup.get_objects(context, id=port_sg) valid_groups = set( g.id for g in sg_objs if (not tenant_id or g.tenant_id == tenant_id or sg_obj.SecurityGroup.is_shared_with_tenant( context, g.id, tenant_id)) ) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing)) return list(requested_groups)
def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ port = port['port'] if not validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): return if port.get('device_owner') and utils.is_port_trusted(port): return port_sg = port.get(ext_sg.SECURITYGROUPS, []) filters = {'id': port_sg} tenant_id = port.get('tenant_id') if tenant_id: filters['tenant_id'] = [tenant_id] valid_groups = set(g['id'] for g in self.get_security_groups(context, fields=['id'], filters=filters)) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=', '.join(port_sg_missing)) return requested_groups
def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = net_obj.NetworkDNSDomain.get_object( plugin_context, network_id=net_id) if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value net_dns_domain.update() else: net_dns_domain.delete() db_data[dns.DNSDOMAIN] = '' elif new_value: net_obj.NetworkDNSDomain(plugin_context, network_id=net_id, dns_domain=new_value).create() db_data[dns.DNSDOMAIN] = new_value
def check_and_truncate(display_name): if (validators.is_attr_set(display_name) and len(display_name) > MAX_DISPLAY_NAME_LEN): LOG.debug("Specified name:'%s' exceeds maximum length. " "It will be truncated on NSX", display_name) return display_name[:MAX_DISPLAY_NAME_LEN] return display_name or ''
def _process_dns_floatingip_create_precommit(self, context, floatingip_data, req_data): # expects to be called within a plugin's session dns_domain = req_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if not self.dns_driver: return dns_name = req_data[dns.DNSNAME] self._validate_floatingip_dns(dns_name, dns_domain) current_dns_name, current_dns_domain = ( self._get_requested_state_for_external_dns_service_create( context, floatingip_data, req_data)) dns_actions_data = None if current_dns_name and current_dns_domain: context.session.add(FloatingIPDNS( floatingip_id=floatingip_data['id'], dns_name=req_data[dns.DNSNAME], dns_domain=req_data[dns.DNSDOMAIN], published_dns_name=current_dns_name, published_dns_domain=current_dns_domain)) dns_actions_data = DNSActionsData( current_dns_name=current_dns_name, current_dns_domain=current_dns_domain) floatingip_data['dns_name'] = dns_name floatingip_data['dns_domain'] = dns_domain return dns_actions_data
def _process_l3_update(self, context, net_data, req_data, allow_all=True): new_value = req_data.get(extnet_apidef.EXTERNAL) net_id = net_data['id'] if not validators.is_attr_set(new_value): return if net_data.get(extnet_apidef.EXTERNAL) == new_value: return if new_value: net_obj.ExternalNetwork( context, network_id=net_id).create() net_data[extnet_apidef.EXTERNAL] = True if allow_all: context.session.add(rbac_db.NetworkRBAC( object_id=net_id, action='access_as_external', target_tenant='*', tenant_id=net_data['tenant_id'])) else: # must make sure we do not have any external gateway ports # (and thus, possible floating IPs) on this network before # allow it to be update to external=False port = context.session.query(models_v2.Port).filter_by( device_owner=DEVICE_OWNER_ROUTER_GW, network_id=net_data['id']).first() if port: raise extnet_exc.ExternalNetworkInUse(net_id=net_id) net_obj.ExternalNetwork.delete_objects( context, network_id=net_id) for rbdb in (context.session.query(rbac_db.NetworkRBAC).filter_by( object_id=net_id, action='access_as_external')): context.session.delete(rbdb) net_data[extnet_apidef.EXTERNAL] = False
def _process_l3_update(self, context, net_data, req_data, allow_all=True): new_value = req_data.get(extnet_apidef.EXTERNAL) net_id = net_data['id'] if not validators.is_attr_set(new_value): return if net_data.get(extnet_apidef.EXTERNAL) == new_value: return if new_value: net_obj.ExternalNetwork( context, network_id=net_id).create() net_data[extnet_apidef.EXTERNAL] = True if allow_all: net_rbac_args = {'project_id': net_data['tenant_id'], 'object_id': net_id, 'action': 'access_as_external', 'target_tenant': '*'} net_obj.NetworkRBAC(context, **net_rbac_args).create() else: # must make sure we do not have any external gateway ports # (and thus, possible floating IPs) on this network before # allow it to be update to external=False if context.session.query(models_v2.Port.id).filter_by( device_owner=constants.DEVICE_OWNER_ROUTER_GW, network_id=net_data['id']).first(): raise extnet_exc.ExternalNetworkInUse(net_id=net_id) net_obj.ExternalNetwork.delete_objects( context, network_id=net_id) net_obj.NetworkRBAC.delete_objects( context, object_id=net_id, action='access_as_external') net_data[extnet_apidef.EXTERNAL] = False
def _process_port_create_security_group(self, context, port, security_group_ids): if validators.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, port["id"], security_group_id) # Convert to list as a set might be passed here and # this has to be serialized port[ext_sg.SECURITYGROUPS] = security_group_ids and list(security_group_ids) or []
def _process_l3_create(self, context, net_data, req_data): external = req_data.get(external_net.EXTERNAL) external_set = validators.is_attr_set(external) if not external_set: return # TODO(armax): these notifications should switch to *_COMMIT # when the event becomes available, as this block is expected # to be called within a plugin's session if external: try: registry.notify( resources.EXTERNAL_NETWORK, events.BEFORE_CREATE, self, context=context, request=req_data, network=net_data) except c_exc.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error context.session.add( ext_net_models.ExternalNetwork(network_id=net_data['id'])) context.session.add(rbac_db.NetworkRBAC( object_id=net_data['id'], action='access_as_external', target_tenant='*', tenant_id=net_data['tenant_id'])) registry.notify( resources.EXTERNAL_NETWORK, events.AFTER_CREATE, self, context=context, request=req_data, network=net_data) net_data[external_net.EXTERNAL] = external
def create_port(self, context, port): p = port['port'] with context.session.begin(subtransactions=True): p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port( context, port) neutron_db = super(PortSecurityTestPlugin, self).create_port( context, port) p.update(neutron_db) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, p) p[psec.PORTSECURITY] = port_security self._process_port_port_security_create(context, p, neutron_db) if (validators.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and not (port_security and has_ip)): raise psec.PortSecurityAndIPRequiredForSecurityGroups() # Port requires ip and port_security enabled for security group if has_ip and port_security: self._ensure_default_security_group_on_port(context, port) if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]): self._process_port_create_security_group( context, p, p[ext_sg.SECURITYGROUPS]) return port['port']
def _process_create_allowed_address_pairs(self, context, port, allowed_address_pairs): if not validators.is_attr_set(allowed_address_pairs): return [] try: with db_api.CONTEXT_WRITER.using(context): for address_pair in allowed_address_pairs: # use port.mac_address if no mac address in address pair if 'mac_address' not in address_pair: address_pair['mac_address'] = port['mac_address'] # retain string format as passed through API mac_address = utils.AuthenticEUI( address_pair['mac_address']) ip_address = utils.AuthenticIPNetwork( address_pair['ip_address']) pair_obj = obj_addr_pair.AllowedAddressPair( context, port_id=port['id'], mac_address=mac_address, ip_address=ip_address) pair_obj.create() except exceptions.NeutronDbObjectDuplicateEntry: raise addr_exc.DuplicateAddressPairInRequest( mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) return allowed_address_pairs
def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ p = port['port'] if not validators.is_attr_set( p.get(securitygroups_db.ext_sg.SECURITYGROUPS)): return if p.get('device_owner') and p['device_owner'].startswith('network:'): return port_sg = p.get(securitygroups_db.ext_sg.SECURITYGROUPS, []) filters = {'id': port_sg} valid_groups = set(g['id'] for g in self.get_security_groups(context, fields=['id'], filters=filters)) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise securitygroups_db.ext_sg.SecurityGroupNotFound( id=', '.join(port_sg_missing)) return requested_groups
def _process_l3_update(self, context, net_data, req_data, allow_all=True): try: registry.notify(resources.EXTERNAL_NETWORK, events.BEFORE_UPDATE, self, context=context, request=req_data, network=net_data) except c_exc.CallbackFailure as e: # raise the underlying exception raise e.errors[0].error new_value = req_data.get(external_net.EXTERNAL) net_id = net_data['id'] if not validators.is_attr_set(new_value): return if net_data.get(external_net.EXTERNAL) == new_value: return if new_value: net_obj.ExternalNetwork(context, network_id=net_id).create() net_data[external_net.EXTERNAL] = True if allow_all: context.session.add( rbac_db.NetworkRBAC(object_id=net_id, action='access_as_external', target_tenant='*', tenant_id=net_data['tenant_id'])) else: # must make sure we do not have any external gateway ports # (and thus, possible floating IPs) on this network before # allow it to be update to external=False port = context.session.query( models_v2.Port).filter_by(device_owner=DEVICE_OWNER_ROUTER_GW, network_id=net_data['id']).first() if port: raise external_net.ExternalNetworkInUse(net_id=net_id) net_obj.ExternalNetwork.delete_objects(context, network_id=net_id) for rbdb in (context.session.query(rbac_db.NetworkRBAC).filter_by( object_id=net_id, action='access_as_external')): context.session.delete(rbdb) net_data[external_net.EXTERNAL] = False
def _validate_port_redirect_target(self, context, port, rtargets): if not is_attr_set(rtargets): return if len(rtargets) > 1: msg = (_("Multiple redirect targets on a port not supported ")) raise nuage_exc.NuageBadRequest(msg=msg) subnet_mapping = nuagedb.get_subnet_l2dom_by_id( context.session, port['fixed_ips'][0]['subnet_id']) nuage_rtargets_ids = [] for rtarget in rtargets: uuid_match = re.match(lib_constants.UUID_PATTERN, rtarget) if not uuid_match: nuage_rtarget = self._resource_finder(context, 'port', 'nuage_redirect_target', rtarget) nuage_rtarget_id = nuage_rtarget['id'] nuage_rtargets_ids.append(nuage_rtarget_id) else: nuage_rtarget_id = rtarget nuage_rtargets_ids.append(rtarget) # validate rtarget is in the same subnet as port rtarget_resp = self.vsdclient.get_nuage_redirect_target( nuage_rtarget_id) if not rtarget_resp: msg = (_("Redirect target %s does not exist on VSD ") % nuage_rtarget_id) raise nuage_exc.NuageBadRequest(msg=msg) parent_type = rtarget_resp['parentType'] parent = rtarget_resp['parentID'] validate_params = { 'parent': parent, 'parent_type': parent_type, 'nuage_subnet_id': subnet_mapping['nuage_subnet_id'] } if subnet_mapping and ( not self.vsdclient.validate_port_create_redirect_target( validate_params)): msg = ("Redirect Target belongs to subnet %s that is " "different from port subnet %s" % (subnet_mapping['subnet_id'], port['fixed_ips'][0]['subnet_id'])) raise nuage_exc.NuageBadRequest(msg=msg) return nuage_rtargets_ids
def _process_portbindings_create_and_update( self, context, port, port_res, vif_type=nsx_constants.VIF_TYPE_DVS): super(NsxPortBindingMixin, self)._process_portbindings_create_and_update( context, port, port_res) port_id = port_res['id'] org_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type( context.session, port_id) vnic_type = port.get(pbin.VNIC_TYPE, org_vnic_type) cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type) == pbin.VNIC_NORMAL) vif_details = {pbin.CAP_PORT_FILTER: cap_port_filter} network = self.get_network(context, port_res['network_id']) if network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT: vif_details[pbin.VIF_DETAILS_VLAN] = FLAT_VLAN elif network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN: vif_details[pbin.VIF_DETAILS_VLAN] = network[pnet.SEGMENTATION_ID] with db_api.CONTEXT_WRITER.using(context): port_binding = context.session.query( pbin_model.PortBinding).filter_by(port_id=port_id).first() if not port_binding: port_binding = pbin_model.PortBinding( port_id=port_id, vif_type=vif_type) context.session.add(port_binding) port_binding.host = port_res[pbin.HOST_ID] or '' port_binding.vnic_type = vnic_type port_binding.vif_details = jsonutils.dumps(vif_details) nsxv_db.update_nsxv_port_ext_attributes( context.session, port_id, vnic_type) profile = port.get(pbin.PROFILE, constants.ATTR_NOT_SPECIFIED) if validators.is_attr_set(profile) or profile is None: port_binding.profile = (jsonutils.dumps(profile) if profile else "") port_res[pbin.VNIC_TYPE] = vnic_type self.extend_port_portbinding(port_res, port_binding)
def update_network(self, context, id, network): qos_id = validators.is_attr_set(network['network'].get(ext_qos.QOS)) mapping = self.get_mapping_for_network(context, id) if qos_id and not mapping: self.create_qos_for_network(context, network['network'][ext_qos.QOS], id) elif not qos_id and mapping: self.delete_qos_for_network(context, id) else: qos_id = network['network'][ext_qos.QOS] mapping = mapping[0] mapping.qos_id = qos_id self.update_mapping_for_network(context, mapping) network = super(QoSTestPlugin, self).update_network(context, id, network) mapping = self.get_mapping_for_network(context, id) if mapping: network[ext_qos.QOS] = mapping[0].qos_id return network
def reserve_network_segment(self, context, segment_data): """Call type drivers to reserve a network segment.""" # Validate the data of segment if not validators.is_attr_set(segment_data[ml2_api.NETWORK_TYPE]): msg = _("network_type required") raise exc.InvalidInput(error_message=msg) net_type = self._get_attribute(segment_data, ml2_api.NETWORK_TYPE) phys_net = self._get_attribute(segment_data, ml2_api.PHYSICAL_NETWORK) seg_id = self._get_attribute(segment_data, ml2_api.SEGMENTATION_ID) segment = {ml2_api.NETWORK_TYPE: net_type, ml2_api.PHYSICAL_NETWORK: phys_net, ml2_api.SEGMENTATION_ID: seg_id} self.validate_provider_segment(segment) # Reserve segment in type driver with db_api.context_manager.writer.using(context): return self.reserve_provider_segment(context, segment)
def _create_local_ip_port_association(self, context, local_ip_id, port_association): fields = port_association['port_association'] fixed_port = port_obj.Port.get_object(context, id=fields['fixed_port_id']) if not fixed_port: msg = _("Port %s not found") % fixed_port.id raise lib_exc.BadRequest(resource='local_ip_port_association', msg=msg) requested_ip = fields['fixed_ip'] if validators.is_attr_set(requested_ip): for ip in fixed_port.fixed_ips: if str(ip.ip_address) == requested_ip: break else: raise lip_exc.LocalIPRequestedIPNotFound(port_id=fixed_port.id, ip=requested_ip) else: if not fixed_port.fixed_ips: raise lip_exc.LocalIPNoIP(port_id=fixed_port.id) if len(fixed_port.fixed_ips) > 1: raise lip_exc.LocalIPNoRequestedIP(port_id=fixed_port.id) requested_ip = fixed_port.fixed_ips[0]['ip_address'] args = { 'local_ip_id': local_ip_id, 'fixed_port_id': fixed_port.id, 'fixed_ip': requested_ip } lip_assoc = lip_obj.LocalIPAssociation(context, **args) try: lip_assoc.create() except obj_exc.NeutronDbObjectDuplicateEntry: LOG.error( "Local IP %(lip)s association to port " "%(port)s already exists.", { 'lip': local_ip_id, 'port': fixed_port.id }) return return lip_assoc
def _get_provider_security_groups_on_port(self, context, port): p = port['port'] tenant_id = p['tenant_id'] provider_sgs = p.get(provider_sg.PROVIDER_SECURITYGROUPS, n_constants.ATTR_NOT_SPECIFIED) if p.get('device_owner') and n_utils.is_port_trusted(p): return self._check_invalid_security_groups_specified(context, p) if not validators.is_attr_set(provider_sgs): if provider_sgs is n_constants.ATTR_NOT_SPECIFIED: provider_sgs = self._get_tenant_provider_security_groups( context, tenant_id) else: # Accept None as indication that this port should not be # associated with any provider security-group. provider_sgs = [] return provider_sgs
def _create_port_dhcp_opts(self, resource, event, trigger, **kwargs): request_port = kwargs.get('request_port') port = kwargs.get('port') vport = kwargs.get('vport') context = kwargs.get('context') if not request_port or \ not lib_validators.is_attr_set( request_port.get('extra_dhcp_opts')): return try: nuagedb.get_subnet_l2dom_by_port_id(context.session, port['id']) except SubnetMappingNotFound: msg = ("Cannot create a port with DHCP options on a subnet that " "does not have mapping to a L2Domain (OR) " "a L3 Subnet on Nuage.") raise nuage_exc.NuageBadRequest(msg=msg) dhcp_options = copy.deepcopy(request_port['extra_dhcp_opts']) for dhcp_opt in dhcp_options: self._translate_dhcp_option(dhcp_opt) self._create_update_extra_dhcp_options(dhcp_options, vport, port['id'])
def process_update_subnet(self, plugin_context, request_data, db_data): new_value = request_data.get(sn_dns.DNS_PUBLISH_FIXED_IP) if not validators.is_attr_set(new_value): return current_value = db_data.get(sn_dns.DNS_PUBLISH_FIXED_IP) if current_value == new_value: return subnet_id = db_data['id'] if new_value: subnet_obj.SubnetDNSPublishFixedIP( plugin_context, subnet_id=subnet_id, dns_publish_fixed_ip=new_value).create() else: sn_obj = subnet_obj.SubnetDNSPublishFixedIP.get_object( plugin_context, subnet_id=subnet_id) sn_obj.delete() db_data[sn_dns.DNS_PUBLISH_FIXED_IP] = new_value
def convert_to_nsx_transport_zones(default_tz_uuid, network=None, bindings=None, default_transport_type=None): # Convert fields from provider request to nsx format if (network and not validators.is_attr_set(network.get(mpnet.SEGMENTS))): return [{ "zone_uuid": default_tz_uuid, "transport_type": default_transport_type }] # Convert fields from db to nsx format if bindings: return _convert_bindings_to_nsx_transport_zones(bindings) # If we end up here we need to convert multiprovider segments into nsx # transport zone configurations return _convert_segments_to_nsx_transport_zones( network.get(mpnet.SEGMENTS), default_tz_uuid)
def put_port_hostid(context, port_id, host): # REVISIT(kevinbenton): this is a workaround to avoid portbindings_db # relational table generation until one of the functions is called. from neutron.db.models import portbinding if not validators.is_attr_set(host): LOG.warning("No host_id in port request to track port location.") return if port_id == '': LOG.warning("Received an empty port ID for host_id '%s'", host) return if host == '': LOG.debug("Received an empty host_id for port '%s'", port_id) return LOG.debug("Logging port %(port)s on host_id %(host)s", { 'port': port_id, 'host': host }) with context.session.begin(subtransactions=True): location = portbinding.PortBindingPort(port_id=port_id, host=host) context.session.merge(location)
def _process_mido_portbindings_create_and_update(self, context, port_data, port): port_id = port['id'] # Set profile to {} if the binding:profile key exists but set to None. # This is for the special handling in the case the user wants to remove # the binding. profile = None if portbindings.PROFILE in port_data: profile = port_data.get(portbindings.PROFILE) or {} profile_set = validators.is_attr_set(profile) if_name = profile.get('interface_name') if profile_set else None if profile_set and profile: # Update or create, so validate the inputs if not if_name: msg = 'The interface name was not provided or empty' raise n_exc.BadRequest(resource='port', msg=msg) if self.get_port_host(context, port_id) is None: msg = 'Cannot set binding because the host is not bound' raise n_exc.BadRequest(resource='port', msg=msg) with context.session.begin(subtransactions=True): bind_port = context.session.query(PortBindingInfo).filter_by( port_id=port_id).first() if profile_set: if bind_port: if if_name: bind_port.interface_name = if_name else: context.session.delete(bind_port) elif if_name: context.session.add( PortBindingInfo(port_id=port_id, interface_name=if_name)) else: if_name = bind_port.interface_name if bind_port else None self._extend_mido_portbinding(port, if_name)
def reserve_network_segment(self, context, segment_data): """Call type drivers to reserve a network segment.""" # Validate the data of segment if not validators.is_attr_set(segment_data[api.NETWORK_TYPE]): msg = _("network_type required") raise exc.InvalidInput(error_message=msg) net_type = self._get_attribute(segment_data, api.NETWORK_TYPE) phys_net = self._get_attribute(segment_data, api.PHYSICAL_NETWORK) seg_id = self._get_attribute(segment_data, api.SEGMENTATION_ID) segment = { api.NETWORK_TYPE: net_type, api.PHYSICAL_NETWORK: phys_net, api.SEGMENTATION_ID: seg_id } self.validate_provider_segment(segment) # Reserve segment in type driver with context.session.begin(subtransactions=True): return self.reserve_provider_segment(context, segment)
def _process_create_allowed_address_pairs(self, context, port, allowed_address_pairs): if not validators.is_attr_set(allowed_address_pairs): return [] try: with context.session.begin(subtransactions=True): for address_pair in allowed_address_pairs: # use port.mac_address if no mac address in address pair if 'mac_address' not in address_pair: address_pair['mac_address'] = port['mac_address'] db_pair = AllowedAddressPair( port_id=port['id'], mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) context.session.add(db_pair) except db_exc.DBDuplicateEntry: raise addr_pair.DuplicateAddressPairInRequest( mac_address=address_pair['mac_address'], ip_address=address_pair['ip_address']) return allowed_address_pairs
def _validate_ip_prefix_allowed_address_pairs(allowed_address_pairs): """ Dragonflow only supports host IPs in allowed address pairs. This method validates that no network IPs (prefix IPs) are given in the allowed address pairs. """ if not validators.is_attr_set(allowed_address_pairs): return [] # Not support IP address prefix yet for pair in allowed_address_pairs: if '/' in pair["ip_address"]: raise Exception(_("DF don't support IP prefix in allowed" "address pairs yet. The allowed address" "pair {ip_address = %(ip_address), " "mac_address = %(mac_address)} " "caused this exception.} "), {'ip_address': pair["ip_address"], 'mac_address': pair["mac_address"]}) supported_allowed_address_pairs = list(allowed_address_pairs) return supported_allowed_address_pairs
def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = plugin_context.session.query( dns_db.NetworkDNSDomain).filter_by(network_id=net_id).one() if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value else: plugin_context.session.delete(net_dns_domain) db_data[dns.DNSDOMAIN] = '' elif new_value: plugin_context.session.add(dns_db.NetworkDNSDomain( network_id=net_id, dns_domain=new_value)) db_data[dns.DNSDOMAIN] = new_value
def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ p = port['port'] if not validators.is_attr_set( p.get(securitygroups_db.ext_sg.SECURITYGROUPS)): return if p.get('device_owner') and p['device_owner'].startswith('network:'): return port_sg = p.get(securitygroups_db.ext_sg.SECURITYGROUPS, []) filters = {'id': port_sg} valid_groups = set(g['id'] for g in self.get_security_groups( context, fields=['id'], filters=filters)) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise securitygroups_db.ext_sg.SecurityGroupNotFound( id=', '.join(port_sg_missing)) return requested_groups
def is_host_set(cls, host): """Utility to tell if the host is set in the port binding""" # This seems redundant, but its not. Host is unset if its None, '', # or ATTR_NOT_SPECIFIED due to differences in host binding # implementations. return host and validators.is_attr_set(host)
def _has_address_pairs(self, port): return (validators.is_attr_set(port['port'][addr_apidef.ADDRESS_PAIRS]) and port['port'][addr_apidef.ADDRESS_PAIRS] != [])
def post(self, **kw): context = t_context.extract_context_from_environ() if not policy.enforce(context, policy.ADMIN_API_PODS_CREATE): pecan.abort(401, _('Unauthorized to create core_router')) return core_router = request.context['request_data']['core_router'] _uuid = uuidutils.generate_uuid() dc = core_router.get('dc') project_id = core_router.get('project_id', '').strip() core_router_name = core_router.get('core_router_name', '').strip() admin_state_up = core_router.get('admin_state_up', True) status = core_router.get('status', "DOWN") description = core_router.get('description', '').strip() routes = core_router.get('routes') try: with context.session.begin(): new_core_router = core.create_resource( context, models.CoreRouter, { 'id': _uuid, 'dc': dc, 'project_id': project_id, 'core_router_name': core_router_name, 'admin_state_up': admin_state_up, 'status': status, 'description': description }) if validators.is_attr_set(routes): self._create_routerroutes(context, _uuid, routes) new_core_router = core.get_resource(context, models.CoreRouter, _uuid) return_object = m.SuccessMessage( result={'core_router': new_core_router}) return return_object.to_dict() except db_exc.DBDuplicateEntry as e1: LOG.exception( 'Record core_router already exists for ' 'dc %(dc)s: ' '%(exception)s', { 'dc': dc, 'exception': e1 }) return_object = m.CoreRouterForDCExists(dc=dc) return return_object.to_dict() except Exception as e2: LOG.exception( 'Failed to create core_router : ' 'dc: %(dc)s,' 'project_id %(project_id)s: ' 'core_router_name: %(core_router_name)s,' '%(exception)s ', { 'dc': dc, 'project_id': project_id, 'core_router_name': core_router_name, 'exception': e2 }) return_object = m.FailureMessage() return return_object.to_dict()
def _validate_no_routes(self, router): if (validators.is_attr_set(router.get('routes')) and len(router['routes']) > 0): msg = _("Cannot configure static routes on a shared router") raise n_exc.InvalidInput(error_message=msg)
def _update_implicit_subnetpool(self, context, request, result): if validators.is_attr_set(request['subnetpool'].get('is_implicit')): result['is_implicit'] = request['subnetpool']['is_implicit'] result['is_implicit'] = (self.update_implicit_subnetpool( context, result))
def _make_subnet_args(self, detail, subnet, subnetpool_id): args = super(IpamBackendMixin, self)._make_subnet_args(detail, subnet, subnetpool_id) if validators.is_attr_set(subnet.get(segment.SEGMENT_ID)): args['segment_id'] = subnet[segment.SEGMENT_ID] return args
def _ipam_get_subnets(self, context, network_id, host): Subnet = models_v2.Subnet SegmentHostMapping = segment_svc_db.SegmentHostMapping query = self._get_collection_query(context, Subnet) query = query.filter(Subnet.network_id == network_id) # Note: This seems redundant, but its not. It has to cover cases # where host is None, ATTR_NOT_SPECIFIED, or '' due to differences in # host binding implementations. if not validators.is_attr_set(host) or not host: query = query.filter(Subnet.segment_id.is_(None)) return [self._make_subnet_dict(c, context=context) for c in query] # A host has been provided. Consider these two scenarios # 1. Not a routed network: subnets are not on segments # 2. Is a routed network: only subnets on segments mapped to host # The following join query returns results for either. The two are # guaranteed to be mutually exclusive when subnets are created. query = query.add_entity(SegmentHostMapping) query = query.outerjoin( SegmentHostMapping, and_(Subnet.segment_id == SegmentHostMapping.segment_id, SegmentHostMapping.host == host)) # Essentially "segment_id IS NULL XNOR host IS NULL" query = query.filter( or_( and_(Subnet.segment_id.isnot(None), SegmentHostMapping.host.isnot(None)), and_(Subnet.segment_id.is_(None), SegmentHostMapping.host.is_(None)))) results = query.all() # See if results are empty because the host isn't mapped to a segment if not results: # Check if it's a routed network (i.e subnets on segments) query = self._get_collection_query(context, Subnet) query = query.filter(Subnet.network_id == network_id) query = query.filter(Subnet.segment_id.isnot(None)) if query.count() == 0: return [] # It is a routed network but no subnets found for host raise segment_exc.HostNotConnectedToAnySegment( host=host, network_id=network_id) # For now, we're using a simplifying assumption that a host will only # touch one segment in a given routed network. Raise exception # otherwise. This restriction may be relaxed as use cases for multiple # mappings are understood. segment_ids = { subnet.segment_id for subnet, mapping in results if mapping } if 1 < len(segment_ids): raise segment_exc.HostConnectedToMultipleSegments( host=host, network_id=network_id) return [ self._make_subnet_dict(subnet, context=context) for subnet, _mapping in results ]
def _save_subnet(self, context, network, subnet_args, dns_nameservers, host_routes, subnet_request): network_scope = addr_scope_obj.AddressScope.get_network_address_scope( context, network.id, subnet_args['ip_version']) # 'subnetpool' is not necessarily an object subnetpool = subnet_args.get('subnetpool_id') if subnetpool and subnetpool != const.IPV6_PD_POOL_ID: subnetpool = self._get_subnetpool(context, subnetpool) self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['ip_version'], subnetpool, network_scope) service_types = subnet_args.pop('service_types', []) segment_id = subnet_args.get('segment_id') if segment_id: # TODO(slaweq): integrate check if segment exists in # self._validate_segment() method segment = network_obj.NetworkSegment.get_object(context, id=segment_id) if not segment: raise segment_exc.SegmentNotFound(segment_id=segment_id) subnet = subnet_obj.Subnet(context, **subnet_args) subnet.create() # TODO(slaweq): when check is segment exists will be integrated in # self._validate_segment() method, it should be moved to be done before # subnet object is created self._validate_segment(context, network['id'], segment_id) # NOTE(changzhi) Store DNS nameservers with order into DB one # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): dns = subnet_obj.DNSNameServer(context, address=server, order=order, subnet_id=subnet.id) dns.create() if validators.is_attr_set(host_routes): for rt in host_routes: route = subnet_obj.Route( context, subnet_id=subnet.id, destination=net_utils.AuthenticIPNetwork( rt['destination']), nexthop=netaddr.IPAddress(rt['nexthop'])) route.create() if validators.is_attr_set(service_types): for service_type in service_types: service_type_obj = subnet_obj.SubnetServiceType( context, subnet_id=subnet.id, service_type=service_type) service_type_obj.create() self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) return subnet_obj.Subnet.get_object(context, id=subnet.id)
def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] with db_api.CONTEXT_WRITER.using(context): # First we allocate port in neutron database neutron_db = super(NsxDvsV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create(context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # security group extension checks if has_ip: self._ensure_default_security_group_on_port(context, port) elif validators.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) self._process_portbindings_create_and_update( context, port['port'], port_data) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] self._process_portbindings_create_and_update( context, port['port'], port_data) self._process_vnic_type(context, port_data, neutron_db['id']) LOG.debug( "create_port completed on NSX for tenant " "%(tenant_id)s: (%(id)s)", port_data) # DB Operation is complete, perform DVS operation port_data = port['port'] # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_port_dict_binding(port_data, port_model) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data
def _get_value(self, data, key): value = data[key] if not validators.is_attr_set(value): value = '' return value
def _is_ha(cls, router): ha = router.get('ha') if not validators.is_attr_set(ha): ha = cfg.CONF.l3_ha return ha
def validate_and_get_data_from_binding_profile(port): if (constants.OVN_PORT_BINDING_PROFILE not in port or not validators.is_attr_set( port[constants.OVN_PORT_BINDING_PROFILE])): return {} param_set = {} param_dict = {} vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) # A port's capabilities is listed as part of the binding profile, but we # treat it separately and do not want it to be included in the generic # validation. binding_profile = copy.deepcopy(port[constants.OVN_PORT_BINDING_PROFILE]) capabilities = binding_profile.pop(constants.PORT_CAP_PARAM, []) if not isinstance(capabilities, list): msg = _('Invalid binding:profile. %s must be of type list.' ) % constants.PORT_CAP_PARAM raise n_exc.InvalidInput(error_message=msg) for pbp_param_set in constants.OVN_PORT_BINDING_PROFILE_PARAMS: if pbp_param_set.vnic_type: if pbp_param_set.vnic_type != vnic_type: continue if capabilities and pbp_param_set.capability not in capabilities: continue param_set = pbp_param_set.param_set param_keys = param_set.keys() for param_key in param_keys: try: param_dict[param_key] = binding_profile[param_key] except KeyError: pass if len(param_dict) == 0: continue if len(param_dict) != len(param_keys): msg = _('Invalid binding:profile. %s are all ' 'required.') % param_keys raise n_exc.InvalidInput(error_message=msg) if (len(binding_profile) != len(param_keys)): msg = _('Invalid binding:profile. too many parameters') raise n_exc.InvalidInput(error_message=msg) break if not param_dict: return {} # With this example param_set: # # param_set = { # 'do_not_check_this_key': None, # 'pci_slot': [str], # 'physical_network': [str, type(None)] # } # # We confirm that each binding_profile key is of one of the listed types, # allowing validation of polymorphic entries. # # 'physical_network' is polymorphic because: When a VNIC_REMOTE_MANAGED or # VNIC_DIRECT with PORT_CAP_SWITCHDEV capability port is attached to a # project network backed by an overlay (tunneled) network the value will be # 'None'. For the case of ports attached to a project network backed by # VLAN the value will be of type ``str``. This comes from Nova and is # provided in the ``physical_network`` tag in the Nova PCI Passthrough # configuration. # # In the above example the type of the value behind 'do_not_check_this_key' # will not be checked, 'pci_slot' must be ``str``, 'physical_network must # be either ``str`` or ``NoneType``. for param_key, param_types in param_set.items(): if param_types is None: continue param_value = param_dict[param_key] for param_type in param_types: if isinstance(param_value, param_type): break else: msg = _('Invalid binding:profile. %(key)s %(value)s ' 'value invalid type') % { 'key': param_key, 'value': param_value } raise n_exc.InvalidInput(error_message=msg) # Make sure we can successfully look up the port indicated by # parent_name. Just let it raise the right exception if there is a # problem. if 'parent_name' in param_set: plugin = directory.get_plugin() plugin.get_port(n_context.get_admin_context(), param_dict['parent_name']) if 'tag' in param_set: tag = int(param_dict['tag']) if tag < 0 or tag > 4095: msg = _('Invalid binding:profile. tag "%s" must be ' 'an integer between 0 and 4095, inclusive') % tag raise n_exc.InvalidInput(error_message=msg) return param_dict
def get_vlan_transparent(network): return (network['vlan_transparent'] if ('vlan_transparent' in network and validators.is_attr_set(network['vlan_transparent'])) else False)