def _make_port_dict(self, port, fields=None, process_extensions=True): res = { "id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": port["mac_address"], "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{ 'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"] } for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"] } # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(port_def.COLLECTION_NAME, res, port) return db_utils.resource_fields(res, fields)
def _make_port_dict(self, port, fields=None, process_extensions=True, with_fixed_ips=True): mac = port["mac_address"] if isinstance(mac, netaddr.EUI): mac.dialect = netaddr.mac_unix_expanded res = { "id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": str(mac), "admin_state_up": port["admin_state_up"], "status": port["status"], "device_id": port["device_id"], "device_owner": port["device_owner"] } if with_fixed_ips: res["fixed_ips"] = [{ 'subnet_id': ip["subnet_id"], 'ip_address': str(ip["ip_address"]) } for ip in port["fixed_ips"]] # Call auxiliary extend functions, if any if process_extensions: port_data = port if isinstance(port, port_obj.Port): port_data = port.db_obj resource_extend.apply_funcs(port_def.COLLECTION_NAME, res, port_data) return db_utils.resource_fields(res, fields)
def _make_port_dict(self, port, fields=None, process_extensions=True): mac = port["mac_address"] if isinstance(mac, netaddr.EUI): mac.dialect = netaddr.mac_unix_expanded res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": str(mac), "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} # Call auxiliary extend functions, if any if process_extensions: port_data = port if isinstance(port, port_obj.Port): port_data = port.db_obj resource_extend.apply_funcs( port_def.COLLECTION_NAME, res, port_data) return db_utils.resource_fields(res, fields)
def _make_security_group_rule_dict(self, security_group_rule, fields=None): # TODO(slaweq): switch this to use OVO instead of db object res = { 'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_address_group_id': security_group_rule['remote_address_group_id'], 'normalized_cidr': self._get_normalized_cidr_from_rule(security_group_rule), 'remote_group_id': security_group_rule['remote_group_id'], 'standard_attr_id': security_group_rule.standard_attr.id, } resource_extend.apply_funcs(ext_sg.SECURITYGROUPRULES, res, security_group_rule) return db_utils.resource_fields(res, fields)
def _make_subnet_dict(self, subnet, fields=None, context=None): res = { 'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], } res['gateway_ip'] = str( subnet['gateway_ip']) if subnet['gateway_ip'] else None # TODO(korzen) this method can get subnet as DB object or Subnet OVO, # so temporary workaround will be to fill in the fields in separate # ways. After converting all code pieces to use Subnet OVO, the latter # 'else' can be deleted if isinstance(subnet, subnet_obj.Subnet): res['cidr'] = str(subnet.cidr) res['allocation_pools'] = [{ 'start': str(pool.start), 'end': str(pool.end) } for pool in subnet.allocation_pools] res['host_routes'] = [{ 'destination': str(route.destination), 'nexthop': str(route.nexthop) } for route in subnet.host_routes] res['dns_nameservers'] = [ str(dns.address) for dns in subnet.dns_nameservers ] res['shared'] = subnet.shared # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet.db_obj) else: res['cidr'] = subnet['cidr'] res['allocation_pools'] = [{ 'start': pool['first_ip'], 'end': pool['last_ip'] } for pool in subnet['allocation_pools']] res['host_routes'] = [{ 'destination': route['destination'], 'nexthop': route['nexthop'] } for route in subnet['routes']] res['dns_nameservers'] = [ dns['address'] for dns in subnet['dns_nameservers'] ] # The shared attribute for a subnet is the same # as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) return db_utils.resource_fields(res, fields)
def get_port(self, context, id, fields=None): port = super(NsxDvsV2, self).get_port(context, id, fields=None) if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) else: port[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS return db_utils.resource_fields(port, fields)
def _make_segment_dict(segment_obj, fields=None): res = {'id': segment_obj['id'], 'network_id': segment_obj['network_id'], 'name': segment_obj['name'], 'description': segment_obj['description'], db.PHYSICAL_NETWORK: segment_obj[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_obj[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_obj[db.SEGMENTATION_ID], 'hosts': segment_obj['hosts'], 'segment_index': segment_obj['segment_index']} resource_extend.apply_funcs('segments', res, segment_obj.db_obj) return db_utils.resource_fields(res, fields)
def to_dict(self, fields=None): _dict = super(NetworkSegmentRange, self).to_dict() # extend the network segment range dict with `available` and `used` # fields _dict.update({'available': self._get_available_allocation()}) _dict.update({'used': self._get_used_allocation_mapping()}) _dict = db_utils.resource_fields(_dict, fields) # TODO(ralonsoh): remove once bp/keystone-v3 migration finishes. _dict.pop('tenant_id', None) resource_extend.apply_funcs(nsr_def.COLLECTION_NAME, _dict, self.db_obj) return _dict
def to_dict(self, fields=None): _dict = super(NetworkSegmentRange, self).to_dict() # extend the network segment range dict with `available` and `used` # fields _dict.update({'available': self._get_available_allocation()}) _dict.update({'used': self._get_used_allocation_mapping()}) # NOTE(ralonsoh): this workaround should be removed once the migration # from "tenant_id" to "project_id" is finished. _dict = db_utils.resource_fields(_dict, fields) _dict.pop('tenant_id', None) resource_extend.apply_funcs(nsr_def.COLLECTION_NAME, _dict, self.db_obj) return _dict
def _make_subnet_dict(self, subnet, fields=None, context=None): res = {'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], } res['gateway_ip'] = str( subnet['gateway_ip']) if subnet['gateway_ip'] else None # TODO(korzen) this method can get subnet as DB object or Subnet OVO, # so temporary workaround will be to fill in the fields in separate # ways. After converting all code pieces to use Subnet OVO, the latter # 'else' can be deleted if isinstance(subnet, subnet_obj.Subnet): res['cidr'] = str(subnet.cidr) res['allocation_pools'] = [{'start': str(pool.start), 'end': str(pool.end)} for pool in subnet.allocation_pools] res['host_routes'] = [{'destination': str(route.destination), 'nexthop': str(route.nexthop)} for route in subnet.host_routes] res['dns_nameservers'] = [str(dns.address) for dns in subnet.dns_nameservers] res['shared'] = subnet.shared # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet.db_obj) else: res['cidr'] = subnet['cidr'] res['allocation_pools'] = [{'start': pool['first_ip'], 'end': pool['last_ip']} for pool in subnet['allocation_pools']] res['host_routes'] = [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in subnet['routes']] res['dns_nameservers'] = [dns['address'] for dns in subnet['dns_nameservers']] # The shared attribute for a subnet is the same # as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) return db_utils.resource_fields(res, fields)
def test_apply_funcs(self): resources = ['A', 'B', 'C'] callbacks = [] def _cb(resp, db_obj): callbacks.append(resp) for r in resources: resource_extend.register_funcs(r, (_cb, )) for r in resources: resource_extend.apply_funcs(r, None, None) self.assertEqual(3, len(callbacks))
def _make_security_group_dict(self, security_group, fields=None): res = { 'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description'] } res['security_group_rules'] = [ self._make_security_group_rule_dict(r.db_obj) for r in security_group.rules ] resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res, security_group.db_obj) return db_utils.resource_fields(res, fields)
def _make_segment_dict(segment_obj, fields=None): res = { 'id': segment_obj['id'], 'network_id': segment_obj['network_id'], 'name': segment_obj['name'], 'description': segment_obj['description'], db.PHYSICAL_NETWORK: segment_obj[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_obj[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_obj[db.SEGMENTATION_ID], 'hosts': segment_obj['hosts'], 'segment_index': segment_obj['segment_index'] } resource_extend.apply_funcs('segments', res, segment_obj.db_obj) return db_utils.resource_fields(res, fields)
def _make_security_group_rule_dict(self, security_group_rule, fields=None): res = {'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_group_id': security_group_rule['remote_group_id']} resource_extend.apply_funcs(ext_sg.SECURITYGROUPRULES, res, security_group_rule) return db_utils.resource_fields(res, fields)
def _make_security_group_dict(self, security_group, fields=None): res = {'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} if security_group.rules: res['security_group_rules'] = [ self._make_security_group_rule_dict(r.db_obj) for r in security_group.rules ] else: res['security_group_rules'] = [] resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res, security_group.db_obj) return db_utils.resource_fields(res, fields)
def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): res = {'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']]} res['shared'] = self._is_network_shared(context, network.rbac_entries) # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(net_def.COLLECTION_NAME, res, network) return db_utils.resource_fields(res, fields)
def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): res = {'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', n_const.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']]} res['shared'] = self._is_network_shared(context, network.rbac_entries) # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(net_def.COLLECTION_NAME, res, network) return db_utils.resource_fields(res, fields)
def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = ( super(NsxDvsV2, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports: if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports])
def _make_port_dict(self, port, fields=None, process_extensions=True): res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": port["mac_address"], "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(port_def.COLLECTION_NAME, res, port) return db_utils.resource_fields(res, fields)
def _make_security_group_dict(self, security_group, fields=None): res = {'id': security_group['id'], 'name': security_group['name'], 'stateful': security_group['stateful'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description'], 'standard_attr_id': security_group.db_obj.standard_attr.id, 'shared': security_group['shared'], } if security_group.rules: res['security_group_rules'] = [ self._make_security_group_rule_dict(r.db_obj) for r in security_group.rules ] else: res['security_group_rules'] = [] resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res, security_group.db_obj) return db_utils.resource_fields(res, fields)
def _make_subnetpool_dict(self, subnetpool, fields=None): default_prefixlen = str(subnetpool['default_prefixlen']) min_prefixlen = str(subnetpool['min_prefixlen']) max_prefixlen = str(subnetpool['max_prefixlen']) res = {'id': subnetpool['id'], 'name': subnetpool['name'], 'tenant_id': subnetpool['tenant_id'], 'default_prefixlen': default_prefixlen, 'min_prefixlen': min_prefixlen, 'max_prefixlen': max_prefixlen, 'is_default': subnetpool['is_default'], 'shared': subnetpool['shared'], 'prefixes': [prefix.cidr for prefix in subnetpool['prefixes']], 'ip_version': subnetpool['ip_version'], 'default_quota': subnetpool['default_quota'], 'address_scope_id': subnetpool['address_scope_id']} resource_extend.apply_funcs( subnetpool_def.COLLECTION_NAME, res, subnetpool) return db_utils.resource_fields(res, fields)
def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = (super(NsxDvsV2, self).get_ports(context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports: if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports])
def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request( context, filters, keys=['device_id', 'network_id', 'fixed_ips']) filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = (super(NsxTVDPlugin, self).get_ports(context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports[:]: port_model = None if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) p = self._get_plugin_from_net_id(context, port['network_id']) if p == req_p or req_p is None: if hasattr(p, '_extend_get_port_dict_qos_and_binding'): p._extend_get_port_dict_qos_and_binding(context, port) else: if not port_model: port_model = port p._extend_port_dict_binding(port, port_model) if hasattr(p, '_remove_provider_security_groups_from_list'): p._remove_provider_security_groups_from_list(port) self._cleanup_obj_fields(port, p.plugin_type(), 'port') else: ports.remove(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports])
def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['device_id', 'network_id', 'fixed_ips']) filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = ( super(NsxTVDPlugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports[:]: port_model = None if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) p = self._get_plugin_from_net_id(context, port['network_id']) if p == req_p or req_p is None: if hasattr(p, '_extend_get_port_dict_qos_and_binding'): p._extend_get_port_dict_qos_and_binding(context, port) else: if not port_model: port_model = port p._extend_port_dict_binding(port, port_model) if hasattr(p, '_remove_provider_security_groups_from_list'): p._remove_provider_security_groups_from_list(port) self._cleanup_obj_fields( port, p.plugin_type(), 'port') else: ports.remove(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports])
def to_dict(self): _dict = super(Trunk, self).to_dict() resource_extend.apply_funcs(trunk_def.TRUNKS, _dict, self.db_obj) return _dict
def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] network_type = self._dvs_get_network(context, port['port'][ 'network_id'])['provider:network_type'] with db_api.CONTEXT_WRITER.using(context): # First we allocate port in neutron database neutron_db = super(NsxDvsV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) if network_type and network_type == 'vlan': port_data[psec.PORTSECURITY] = False else: port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create( context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # security group extension checks if network_type and network_type != 'vlan': if has_ip: self._ensure_default_security_group_on_port(context, port) elif validators.is_attr_set(port_data.get( ext_sg.SECURITYGROUPS)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if network_type and network_type == 'vlan': port_data[ext_sg.SECURITYGROUPS] = [] else: port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) self._process_portbindings_create_and_update(context, port['port'], port_data) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] self._process_portbindings_create_and_update(context, port['port'], port_data) self._process_vnic_type(context, port_data, neutron_db['id']) LOG.debug("create_port completed on NSX for tenant " "%(tenant_id)s: (%(id)s)", port_data) # DB Operation is complete, perform DVS operation port_data = port['port'] # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_port_dict_binding(port_data, port_model) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data
def _dvs_create_network(self, context, network): net_data = network['network'] if net_data['admin_state_up'] is False: LOG.warning("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s", net_data.get('name', '<unknown>')) net_data['id'] = uuidutils.generate_uuid() vlan_tag = 0 if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN: vlan_tag = net_data.get(pnet.SEGMENTATION_ID, 0) trunk_mode = False # vlan transparent can be an object if not set. if net_data.get(vlan_apidef.VLANTRANSPARENT) is True: trunk_mode = True net_id = dvs_name = None if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP: net_id = net_data.get(pnet.PHYSICAL_NETWORK) pg_info, dvpg_moref = self._get_portgroup_info(net_id) if pg_info.get('name') != net_data.get('name'): err_msg = (_("Portgroup name %(dvpg)s must match network " "name %(network)s") % {'dvpg': pg_info.get('name'), 'network': net_data.get('name')}) raise n_exc.InvalidInput(error_message=err_msg) dvs_id = dvpg_moref.value else: dvs_id = self._dvs_get_id(net_data) try: dvs_name = self._add_port_group(dvs_id, net_data, vlan_tag, trunk_mode=trunk_mode) except dvs_utils.DvsOperationBulkFault: LOG.warning('One or more hosts may not be configured') try: with db_api.CONTEXT_WRITER.using(context): new_net = super(NsxDvsV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # Process vlan transparent extension net_db = self._get_network(context, new_net['id']) net_db['vlan_transparent'] = trunk_mode net_data['vlan_transparent'] = trunk_mode resource_extend.apply_funcs('networks', net_data, net_db) nsx_db.add_network_binding( context.session, new_net['id'], net_data.get(pnet.NETWORK_TYPE), net_id or dvs_name, vlan_tag) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create network') if (net_data.get(pnet.NETWORK_TYPE) != c_utils.NetworkTypes.PORTGROUP): self._delete_port_group(dvs_id, dvs_name) new_net[pnet.NETWORK_TYPE] = net_data.get(pnet.NETWORK_TYPE) new_net[pnet.PHYSICAL_NETWORK] = net_id or dvs_name new_net[pnet.SEGMENTATION_ID] = vlan_tag # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, net_data['id']) resource_extend.apply_funcs('networks', new_net, net_model) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net
def _create_floatingip(self, context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] fip_id = uuidutils.generate_uuid() f_net_id = fip['floating_network_id'] if not self._core_plugin._network_is_external(context, f_net_id): msg = _("Network %s is not a valid external network") % f_net_id raise n_exc.BadRequest(resource='floatingip', msg=msg) self._validate_network_for_floatingip(context, f_net_id) # This external port is never exposed to the tenant. # it is used purely for internal system and admin use when # managing floating IPs. port = { 'tenant_id': '', # tenant intentionally not set 'network_id': f_net_id, 'admin_state_up': True, 'device_id': 'PENDING', 'device_owner': DEVICE_OWNER_FLOATINGIP, 'status': n_const.PORT_STATUS_NOTAPPLICABLE, 'name': '' } # Both subnet_id and floating_ip_address are accepted, if # floating_ip_address is not in the subnet, # InvalidIpForSubnet exception will be raised. fixed_ip = {} if fip['subnet_id']: fixed_ip['subnet_id'] = fip['subnet_id'] if fip['floating_ip_address']: fixed_ip['ip_address'] = fip['floating_ip_address'] if fixed_ip: port['fixed_ips'] = [fixed_ip] # 'status' in port dict could not be updated by default, use # check_allow_post to stop the verification of system # TODO(boden): rehome create_port into neutron-lib external_port = plugin_utils.create_port(self._core_plugin, context.elevated(), {'port': port}, check_allow_post=False) with plugin_utils.delete_port_on_error( self._core_plugin, context.elevated(), external_port['id']),\ context.session.begin(subtransactions=True): external_ips = self._port_fixed_ips_for_floatingip(external_port) if not external_ips: raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id) floating_fixed_ip = external_ips[0] floating_ip_address = floating_fixed_ip['ip_address'] floatingip_obj = obj_reg.new_instance( 'FloatingIP', context, id=fip_id, project_id=fip['tenant_id'], status=initial_status, floating_network_id=fip['floating_network_id'], floating_ip_address=floating_ip_address, floating_port_id=external_port['id'], description=fip.get('description')) # Update association with internal port # and define external IP address assoc_result = self._update_fip_assoc(context, fip, floatingip_obj) floatingip_obj.create() floatingip_dict = self._make_floatingip_dict( floatingip_obj, process_extensions=False) if self._is_dns_integration_supported: dns_data = self._process_dns_floatingip_create_precommit( context, floatingip_dict, fip) # NOTE(yamamoto): MidoNet doesn't have Floating IP QoS # if self._is_fip_qos_supported: # self._process_extra_fip_qos_create(context, fip_id, fip) floatingip_obj = obj_reg.load_class('FloatingIP').get_object( context, id=floatingip_obj.id) floatingip_db = floatingip_obj.db_obj registry.notify(resources.FLOATING_IP, events.PRECOMMIT_CREATE, self, context=context, floatingip=fip, floatingip_id=fip_id, floatingip_db=floatingip_db) self._core_plugin.update_port(context.elevated(), external_port['id'], {'port': { 'device_id': fip_id }}) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, **assoc_result) if self._is_dns_integration_supported: self._process_dns_floatingip_create_postcommit( context, floatingip_dict, dns_data) # TODO(lujinluo): Change floatingip_db to floatingip_obj once all # codes are migrated to use Floating IP OVO object. resource_extend.apply_funcs(l3_apidef.FLOATINGIPS, floatingip_dict, floatingip_db) return floatingip_dict
def _dvs_create_network(self, context, network): net_data = network['network'] if net_data['admin_state_up'] is False: LOG.warning( "Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s", net_data.get('name', '<unknown>')) net_data['id'] = uuidutils.generate_uuid() vlan_tag = 0 if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN: vlan_tag = net_data.get(pnet.SEGMENTATION_ID, 0) trunk_mode = False # vlan transparent can be an object if not set. if net_data.get(vlan_apidef.VLANTRANSPARENT) is True: trunk_mode = True net_id = dvs_name = None if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP: net_id = net_data.get(pnet.PHYSICAL_NETWORK) pg_info, dvpg_moref = self._get_portgroup_info(net_id) if pg_info.get('name') != net_data.get('name'): err_msg = (_("Portgroup name %(dvpg)s must match network " "name %(network)s") % { 'dvpg': pg_info.get('name'), 'network': net_data.get('name') }) raise n_exc.InvalidInput(error_message=err_msg) dvs_id = dvpg_moref.value else: dvs_id = self._dvs_get_id(net_data) try: dvs_name = self._add_port_group(dvs_id, net_data, vlan_tag, trunk_mode=trunk_mode) except dvs_utils.DvsOperationBulkFault: LOG.warning('One or more hosts may not be configured') try: with db_api.CONTEXT_WRITER.using(context): new_net = super(NsxDvsV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # Process vlan transparent extension net_db = self._get_network(context, new_net['id']) net_db['vlan_transparent'] = trunk_mode net_data['vlan_transparent'] = trunk_mode resource_extend.apply_funcs('networks', net_data, net_db) nsx_db.add_network_binding(context.session, new_net['id'], net_data.get(pnet.NETWORK_TYPE), net_id or dvs_name, vlan_tag) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create network') if (net_data.get(pnet.NETWORK_TYPE) != c_utils.NetworkTypes.PORTGROUP): self._delete_port_group(dvs_id, dvs_name) new_net[pnet.NETWORK_TYPE] = net_data.get(pnet.NETWORK_TYPE) new_net[pnet.PHYSICAL_NETWORK] = net_id or dvs_name new_net[pnet.SEGMENTATION_ID] = vlan_tag # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, net_data['id']) resource_extend.apply_funcs('networks', new_net, net_model) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net
def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] with db_api.CONTEXT_WRITER.using(context): # First we allocate port in neutron database neutron_db = super(NsxDvsV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create(context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # security group extension checks if has_ip: self._ensure_default_security_group_on_port(context, port) elif validators.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) self._process_portbindings_create_and_update( context, port['port'], port_data) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] self._process_portbindings_create_and_update( context, port['port'], port_data) self._process_vnic_type(context, port_data, neutron_db['id']) LOG.debug( "create_port completed on NSX for tenant " "%(tenant_id)s: (%(id)s)", port_data) # DB Operation is complete, perform DVS operation port_data = port['port'] # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_port_dict_binding(port_data, port_model) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data
def to_dict(self): _dict = super(QosPolicy, self).to_dict() resource_extend.apply_funcs(qos_def.POLICIES, _dict, self.db_obj) return _dict