def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # if id is not specified in the filter, we just return network data in # local Neutron server, otherwise id is specified, we need to retrieve # network data from central Neutron server and create network which # doesn't exist in local Neutron server. if not filters or 'id' not in filters: return self.core_plugin.get_networks( context, filters, fields, sorts, limit, marker, page_reverse) b_full_networks = self.core_plugin.get_networks( context, filters, None, sorts, limit, marker, page_reverse) b_networks = [] for b_network in b_full_networks: subnet_ids = self._ensure_subnet(context, b_network, False) if subnet_ids: b_network['subnets'] = subnet_ids b_networks.append(db_utils.resource_fields(b_network, fields)) if len(b_networks) == len(filters['id']): return b_networks t_ctx = t_context.get_context_from_neutron_context(context) if self._skip_non_api_query(t_ctx): return b_networks t_ctx.auth_token = client.Client.get_admin_token(context.project_id) raw_client = self.neutron_handle._get_client(t_ctx) params = self._construct_params(filters, sorts, limit, marker, page_reverse) t_networks = raw_client.list_networks(**params)['networks'] t_id_set = set([network['id'] for network in t_networks]) b_id_set = set([network['id'] for network in b_networks]) missing_id_set = t_id_set - b_id_set if missing_id_set: missing_networks = [network for network in t_networks if ( network['id'] in missing_id_set)] for network in missing_networks: region_name = self._get_neutron_region() located = self._is_network_located_in_region(network, region_name) if not located: LOG.error('network: %(net_id)s not located in current ' 'region: %(region_name)s, ' 'az_hints: %(az_hints)s', {'net_id': network['id'], 'region_name': region_name, 'az_hints': network[az_def.AZ_HINTS]}) continue self._adapt_network_body(network) network.pop('qos_policy_id', None) b_network = self.core_plugin.create_network( context, {'network': network}) subnet_ids = self._ensure_subnet(context, network) if subnet_ids: b_network['subnets'] = subnet_ids b_networks.append(db_utils.resource_fields(b_network, fields)) return b_networks
def test_resource_fields(self, mock_populate): r = { 'name': 'n', 'id': '1', 'desc': None } utils.resource_fields(r, ['name']) mock_populate.assert_called_once_with({'name': 'n'})
def inner(*args, **kwargs): fields = kwargs.get('fields') result = f(*args, **kwargs) if fields is None: return result elif isinstance(result, list): return [db_utils.resource_fields(r, fields) for r in result] else: return db_utils.resource_fields(result, fields)
def inner(*args, **kwargs): fields = kwargs.get('fields') result = f(*args, **kwargs) if fields is None: return result elif isinstance(result, list): return [db_utils.resource_fields(r, fields) for r in result] else: return db_utils.resource_fields(result, fields)
def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # if id is not specified in the filter, we just return subnet data in # local Neutron server, otherwise id is specified, we need to retrieve # subnet data from central Neutron server and create subnet which # doesn't exist in local Neutron server. if not filters or 'id' not in filters: return self.core_plugin.get_subnets(context, filters, fields, sorts, limit, marker, page_reverse) t_ctx = t_context.get_context_from_neutron_context(context) b_full_subnets = self.core_plugin.get_subnets(context, filters, None, sorts, limit, marker, page_reverse) b_subnets = [] for b_subnet in b_full_subnets: if b_subnet['enable_dhcp']: self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet) b_subnets.append(db_utils.resource_fields(b_subnet, fields)) if len(b_subnets) == len(filters['id']): return b_subnets if self._skip_non_api_query(t_ctx): return b_subnets raw_client = self.neutron_handle._get_client(t_ctx) params = self._construct_params(filters, sorts, limit, marker, page_reverse) t_subnets = raw_client.list_subnets(**params)['subnets'] t_id_set = set([subnet['id'] for subnet in t_subnets]) b_id_set = set([subnet['id'] for subnet in b_subnets]) missing_id_set = t_id_set - b_id_set if missing_id_set: missing_subnets = [ subnet for subnet in t_subnets if (subnet['id'] in missing_id_set) ] for subnet in missing_subnets: valid = self._is_valid_network(context, subnet['network_id']) if not valid: continue b_subnet = self._create_bottom_subnet(t_ctx, context, subnet) if b_subnet['enable_dhcp']: self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet) b_subnets.append(db_utils.resource_fields(b_subnet, fields)) return b_subnets
def _make_networks_dict(self, networks, context): nets = [] for network in networks: if network.mtu is None: # TODO(ivar): also refactor this to run for bulk networks network.mtu = self._get_network_mtu(network, validate=False) res = { 'id': network['id'], 'name': network['name'], 'tenant_id': network['tenant_id'], 'admin_state_up': network['admin_state_up'], 'mtu': network.get('mtu', n_const.DEFAULT_NETWORK_MTU), 'status': network['status'], 'subnets': [subnet['id'] for subnet in network['subnets']] } res['shared'] = self._is_network_shared(context, network.rbac_entries) nets.append((res, network)) # Bulk extend first resource_extend.apply_funcs(net_def.COLLECTION_NAME + '_BULK', nets, None) result = [] for res, network in nets: res[api_plus.BULK_EXTENDED] = True resource_extend.apply_funcs(net_def.COLLECTION_NAME, res, network) res.pop(api_plus.BULK_EXTENDED, None) result.append(db_utils.resource_fields(res, [])) return result
def _make_pathport_assoc_dict(self, assoc, fields=None): res = {'pathnode_id': assoc['pathnode_id'], 'portpair_id': assoc['portpair_id'], 'weight': assoc['weight'], } return db_utils.resource_fields(res, fields)
def _make_port_detail_dict(self, port, fields=None): res = { 'id': port['id'], 'project_id': port['project_id'], 'host_id': port['host_id'], 'ingress': port.get('ingress', None), 'egress': port.get('egress', None), 'segment_id': port['segment_id'], 'local_endpoint': port['local_endpoint'], 'mac_address': port['mac_address'], 'in_mac_address': port['in_mac_address'], 'network_type': port['network_type'], 'path_nodes': [{ 'pathnode_id': node['pathnode_id'], 'weight': node['weight'] } for node in port['path_nodes']], 'correlation': port['correlation'] } return db_utils.resource_fields(res, fields)
def _make_pathnode_dict(self, node, fields=None): res = { 'id': node['id'], 'project_id': node['project_id'], 'node_type': node['node_type'], 'nsp': node['nsp'], 'nsi': node['nsi'], 'next_group_id': node['next_group_id'], 'next_hop': node['next_hop'], 'portchain_id': node['portchain_id'], 'status': node['status'], 'portpair_details': [ pair_detail['portpair_id'] for pair_detail in node['portpair_details'] ], 'fwd_path': node['fwd_path'], 'ppg_n_tuple_mapping': node['ppg_n_tuple_mapping'], 'tap_enabled': node['tap_enabled'], 'previous_node_id': node['previous_node_id'] } return db_utils.resource_fields(res, fields)
def _make_networktemplate_dict(self, template, fields=None): return db_utils.resource_fields( { 'id': template.id, 'body': template.body, 'name': template.name }, fields)
def get_network_ip_availabilities(self, context, filters=None, fields=None): """Returns ip availability data for a collection of networks.""" net_ip_availabilities = super(NetworkIPAvailabilityPlugin, self).get_network_ip_availabilities(context, filters) return [db_utils.resource_fields(net_ip_availability, fields) for net_ip_availability in net_ip_availabilities]
def _make_flow_classifier_dict(self, flow_classifier, fields=None): res = { 'id': flow_classifier['id'], 'name': flow_classifier['name'], 'description': flow_classifier['description'], 'project_id': flow_classifier['project_id'], 'ethertype': flow_classifier['ethertype'], 'protocol': flow_classifier['protocol'], 'source_port_range_min': flow_classifier['source_port_range_min'], 'source_port_range_max': flow_classifier['source_port_range_max'], 'destination_port_range_min': ( flow_classifier['destination_port_range_min']), 'destination_port_range_max': ( flow_classifier['destination_port_range_max']), 'source_ip_prefix': flow_classifier['source_ip_prefix'], 'destination_ip_prefix': flow_classifier[ 'destination_ip_prefix'], 'logical_source_port': flow_classifier['logical_source_port'], 'logical_destination_port': flow_classifier[ 'logical_destination_port'], 'l7_parameters': { param['keyword']: param['value'] for k, param in flow_classifier.l7_parameters.items() } } return db_utils.resource_fields(res, fields)
def _make_security_group_rule_dict(self, security_group_rule, fields=None): # TODO(slaweq): switch this to use OVO instead of db object res = { 'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_address_group_id': security_group_rule['remote_address_group_id'], 'normalized_cidr': self._get_normalized_cidr_from_rule(security_group_rule), 'remote_group_id': security_group_rule['remote_group_id'], 'standard_attr_id': security_group_rule.standard_attr.id, } resource_extend.apply_funcs(ext_sg.SECURITYGROUPRULES, res, security_group_rule) return db_utils.resource_fields(res, fields)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None): res = {'id': ipsec_site_conn['id'], 'tenant_id': ipsec_site_conn['tenant_id'], 'name': ipsec_site_conn['name'], 'description': ipsec_site_conn['description'], 'peer_address': ipsec_site_conn['peer_address'], 'peer_id': ipsec_site_conn['peer_id'], 'local_id': ipsec_site_conn['local_id'], 'route_mode': ipsec_site_conn['route_mode'], 'mtu': ipsec_site_conn['mtu'], 'auth_mode': ipsec_site_conn['auth_mode'], 'psk': ipsec_site_conn['psk'], 'initiator': ipsec_site_conn['initiator'], 'dpd': { 'action': ipsec_site_conn['dpd_action'], 'interval': ipsec_site_conn['dpd_interval'], 'timeout': ipsec_site_conn['dpd_timeout'] }, 'admin_state_up': ipsec_site_conn['admin_state_up'], 'status': ipsec_site_conn['status'], 'vpnservice_id': ipsec_site_conn['vpnservice_id'], 'ikepolicy_id': ipsec_site_conn['ikepolicy_id'], 'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'], 'peer_cidrs': [pcidr['cidr'] for pcidr in ipsec_site_conn['peer_cidrs']], 'local_ep_group_id': ipsec_site_conn['local_ep_group_id'], 'peer_ep_group_id': ipsec_site_conn['peer_ep_group_id'], } return db_utils.resource_fields(res, fields)
def _response(network_id, tenant_id, fields=None): """Build response for auto-allocated network.""" res = { 'id': network_id, 'tenant_id': tenant_id } return db_utils.resource_fields(res, fields)
def _make_graph_chain_assoc_dict(self, assoc_db, fields=None): res = { 'service_graph_id': assoc_db['service_graph_id'], 'src_chain': assoc_db['src_chain'], 'dst_chain': assoc_db['dst_chain'] } return db_utils.resource_fields(res, fields)
def _make_port_dict(self, port, fields=None, process_extensions=True, with_fixed_ips=True): mac = port["mac_address"] if isinstance(mac, netaddr.EUI): mac.dialect = netaddr.mac_unix_expanded res = { "id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": str(mac), "admin_state_up": port["admin_state_up"], "status": port["status"], "device_id": port["device_id"], "device_owner": port["device_owner"] } if with_fixed_ips: res["fixed_ips"] = [{ 'subnet_id': ip["subnet_id"], 'ip_address': str(ip["ip_address"]) } for ip in port["fixed_ips"]] # Call auxiliary extend functions, if any if process_extensions: port_data = port if isinstance(port, port_obj.Port): port_data = port.db_obj resource_extend.apply_funcs(port_def.COLLECTION_NAME, res, port_data) return db_utils.resource_fields(res, fields)
def _make_rbac_policy_dict(entry, fields=None): res = {f: entry[f] for f in ('id', 'project_id', 'target_project', 'action', 'object_id')} # TODO(ralonsoh): remove once all calls refer to "target_project" res['target_tenant'] = res['target_project'] res['object_type'] = entry.db_model.object_type return db_utils.resource_fields(res, fields)
def _make_metering_label_rule_dict(metering_label_rule, fields=None): res = {'id': metering_label_rule['id'], 'metering_label_id': metering_label_rule['metering_label_id'], 'direction': metering_label_rule['direction'], 'remote_ip_prefix': metering_label_rule['remote_ip_prefix'], 'excluded': metering_label_rule['excluded']} return db_utils.resource_fields(res, fields)
def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['shared']) filters = filters or {} with db_api.CONTEXT_READER.using(context): networks = (super(NsxTVDPlugin, self).get_networks(context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks[:]: p = self._get_plugin_from_project(context, net['tenant_id']) if p == req_p or req_p is None: p._extend_get_network_dict_provider(context, net) else: networks.remove(net) return (networks if not fields else [ db_utils.resource_fields(network, fields) for network in networks ])
def _make_port_dict(self, port, fields=None, process_extensions=True): mac = port["mac_address"] if isinstance(mac, netaddr.EUI): mac.dialect = netaddr.mac_unix_expanded res = {"id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": str(mac), "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} # Call auxiliary extend functions, if any if process_extensions: port_data = port if isinstance(port, port_obj.Port): port_data = port.db_obj resource_extend.apply_funcs( port_def.COLLECTION_NAME, res, port_data) return db_utils.resource_fields(res, fields)
def _make_metering_label_dict(metering_label, fields=None): res = {'id': metering_label['id'], 'name': metering_label['name'], 'description': metering_label['description'], 'shared': metering_label['shared'], 'tenant_id': metering_label['tenant_id']} return db_utils.resource_fields(res, fields)
def get_flavor_next_provider(self, context, flavor_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """From flavor, choose service profile and find provider for driver.""" objs = obj_flavor.FlavorServiceProfileBinding.get_objects(context, flavor_id=flavor_id) if not objs: raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id='', fl_id=flavor_id) # Get the service profile from the first binding # TODO(jwarendt) Should become a scheduling framework instead sp_obj = self._get_service_profile(context, objs[0].service_profile_id) if not sp_obj.enabled: raise flav_exc.ServiceProfileDisabled() LOG.debug("Found driver %s.", sp_obj.driver) service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': sp_obj.driver}) if not providers: raise flav_exc.ServiceProfileDriverNotFound( driver=sp_obj.driver) LOG.debug("Found providers %s.", providers) res = {'driver': sp_obj.driver, 'provider': providers[0].get('name')} return [db_utils.resource_fields(res, fields)]
def _make_address_scope_dict(address_scope, fields=None): res = {'id': address_scope['id'], 'name': address_scope['name'], 'tenant_id': address_scope['tenant_id'], 'shared': address_scope['shared'], 'ip_version': address_scope['ip_version']} return db_utils.resource_fields(res, fields)
def _make_address_group_dict(address_group, fields=None): res = address_group.to_dict() res['addresses'] = [ str(addr_assoc['address']) for addr_assoc in address_group['addresses'] ] return db_utils.resource_fields(res, fields)
def _make_remote_mac_dict(self, gw_rme_db, fields=None): res = {'id': gw_rme_db['id'], 'mac_address': gw_rme_db['mac_address'], 'vtep_address': gw_rme_db['vtep_address'], 'segmentation_id': gw_rme_db['segmentation_id'], 'device_id': gw_rme_db['device_id']} return db_utils.resource_fields(res, fields)
def _response(network_id, tenant_id, fields=None): """Build response for auto-allocated network.""" res = { 'id': network_id, 'tenant_id': tenant_id } return db_utils.resource_fields(res, fields)
def get_flavor_next_provider(self, context, flavor_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """From flavor, choose service profile and find provider for driver.""" objs = obj_flavor.FlavorServiceProfileBinding.get_objects( context, flavor_id=flavor_id) if not objs: raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id='', fl_id=flavor_id) # Get the service profile from the first binding # TODO(jwarendt) Should become a scheduling framework instead sp_obj = self._get_service_profile(context, objs[0].service_profile_id) if not sp_obj.enabled: raise flav_exc.ServiceProfileDisabled() LOG.debug("Found driver %s.", sp_obj.driver) service_type_manager = sdb.ServiceTypeManager.get_instance() providers = service_type_manager.get_service_providers( context, filters={'driver': sp_obj.driver}) if not providers: raise flav_exc.ServiceProfileDriverNotFound( driver=sp_obj.driver) LOG.debug("Found providers %s.", providers) res = {'driver': sp_obj.driver, 'provider': providers[0].get('name')} return [db_utils.resource_fields(res, fields)]
def _make_port_dict(self, port, fields=None, process_extensions=True): res = { "id": port["id"], 'name': port['name'], "network_id": port["network_id"], 'tenant_id': port['tenant_id'], "mac_address": port["mac_address"], "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{ 'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"] } for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"] } # Call auxiliary extend functions, if any if process_extensions: resource_extend.apply_funcs(port_def.COLLECTION_NAME, res, port) return db_utils.resource_fields(res, fields)
def _make_address_scope_dict(address_scope, fields=None): res = {'id': address_scope['id'], 'name': address_scope['name'], 'tenant_id': address_scope['tenant_id'], 'shared': address_scope['shared'], 'ip_version': address_scope['ip_version']} return db_utils.resource_fields(res, fields)
def _make_net_assoc_dict(self, net_assoc_db, fields=None): res = { 'id': net_assoc_db['id'], 'tenant_id': net_assoc_db['tenant_id'], 'bgpvpn_id': net_assoc_db['bgpvpn_id'], 'network_id': net_assoc_db['network_id'] } return db_utils.resource_fields(res, fields)
def _make_service_profile_dict(sp_obj, fields=None): res = {'id': sp_obj['id'], 'description': sp_obj['description'], 'driver': sp_obj['driver'], 'enabled': sp_obj['enabled'], 'metainfo': sp_obj['metainfo'], 'flavors': list(sp_obj['flavor_ids'])} return db_utils.resource_fields(res, fields)
def _make_floatingip_dict(self, floatingip, fields=None, process_extensions=True): res = super(L3RestProxy, self)._make_floatingip_dict( floatingip, fields=fields, process_extensions=process_extensions) res['floating_port_id'] = floatingip['floating_port_id'] return db_utils.resource_fields(res, fields)
def _make_rbac_policy_dict(db_entry, fields=None): res = { f: db_entry[f] for f in ('id', 'tenant_id', 'target_tenant', 'action', 'object_id') } res['object_type'] = db_entry.object_type return db_utils.resource_fields(res, fields)
def get_network_ip_availability(self, context, id=None, fields=None): """Return ip availability data for a specific network id.""" filters = {'network_id': [id]} result = self.get_network_ip_availabilities(context, filters) if result: return db_utils.resource_fields(result[0], fields) else: raise exceptions.NetworkNotFound(net_id=id)
def get_service_providers(self, filters=None, fields=None): return [db_utils.resource_fields({'service_type': k[0], 'name': k[1], 'driver': v['driver'], 'default': v['default']}, fields) for k, v in self.providers.items() if self._check_entry(k, v, filters)]
def _make_service_profile_dict(sp_obj, fields=None): res = {'id': sp_obj['id'], 'description': sp_obj['description'], 'driver': sp_obj['driver'], 'enabled': sp_obj['enabled'], 'metainfo': sp_obj['metainfo'], 'flavors': list(sp_obj['flavor_ids'])} return db_utils.resource_fields(res, fields)
def _make_floatingip_pool_dict(context, subnet, fields=None): res = {'subnet_id': subnet.id, 'subnet_name': subnet.name, 'tenant_id': context.tenant_id, 'network_id': subnet.network_id, 'cidr': str(subnet.cidr)} return lib_db_utils.resource_fields(res, fields)
def _make_subnet_dict(self, subnet, fields=None, context=None): res = { 'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], } res['gateway_ip'] = str( subnet['gateway_ip']) if subnet['gateway_ip'] else None # TODO(korzen) this method can get subnet as DB object or Subnet OVO, # so temporary workaround will be to fill in the fields in separate # ways. After converting all code pieces to use Subnet OVO, the latter # 'else' can be deleted if isinstance(subnet, subnet_obj.Subnet): res['cidr'] = str(subnet.cidr) res['allocation_pools'] = [{ 'start': str(pool.start), 'end': str(pool.end) } for pool in subnet.allocation_pools] res['host_routes'] = [{ 'destination': str(route.destination), 'nexthop': str(route.nexthop) } for route in subnet.host_routes] res['dns_nameservers'] = [ str(dns.address) for dns in subnet.dns_nameservers ] res['shared'] = subnet.shared # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet.db_obj) else: res['cidr'] = subnet['cidr'] res['allocation_pools'] = [{ 'start': pool['first_ip'], 'end': pool['last_ip'] } for pool in subnet['allocation_pools']] res['host_routes'] = [{ 'destination': route['destination'], 'nexthop': route['nexthop'] } for route in subnet['routes']] res['dns_nameservers'] = [ dns['address'] for dns in subnet['dns_nameservers'] ] # The shared attribute for a subnet is the same # as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) return db_utils.resource_fields(res, fields)
def _make_flavor_dict(flavor_obj, fields=None): res = {'id': flavor_obj['id'], 'name': flavor_obj['name'], 'description': flavor_obj['description'], 'service_type': flavor_obj['service_type'], 'enabled': flavor_obj['enabled'], 'service_profiles': list(flavor_obj['service_profile_ids'])} return db_utils.resource_fields(res, fields)
def _dvs_get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields)
def get_port(self, context, id, fields=None): port = super(NsxDvsV2, self).get_port(context, id, fields=None) if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) else: port[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS return db_utils.resource_fields(port, fields)
def _make_tap_service_dict(self, tap_service, fields=None): res = {'id': tap_service['id'], 'tenant_id': tap_service['tenant_id'], 'name': tap_service['name'], 'description': tap_service['description'], 'port_id': tap_service['port_id'], 'status': tap_service['status']} return db_utils.resource_fields(res, fields)
def get_port(self, context, id, fields=None): port = super(NsxDvsV2, self).get_port(context, id, fields=None) if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) self._extend_port_dict_binding(port, port_model) else: port[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS return db_utils.resource_fields(port, fields)
def _make_flavor_dict(flavor_obj, fields=None): res = {'id': flavor_obj['id'], 'name': flavor_obj['name'], 'description': flavor_obj['description'], 'service_type': flavor_obj['service_type'], 'enabled': flavor_obj['enabled'], 'service_profiles': list(flavor_obj['service_profile_ids'])} return db_utils.resource_fields(res, fields)
def get_trunks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): ret = [] bottom_top_map = {} top_bottom_map = {} t_ctx = t_context.get_context_from_neutron_context(context) route_filters = [{'key': 'resource_type', 'comparator': 'eq', 'value': t_constants.RT_TRUNK}] routes = db_api.list_resource_routings(t_ctx, route_filters) for route in routes: bottom_top_map[route['bottom_id']] = route['top_id'] top_bottom_map[route['top_id']] = route['bottom_id'] if limit: if marker: mappings = db_api.get_bottom_mappings_by_top_id( t_ctx, marker, t_constants.RT_TRUNK) # if mapping exists, we retrieve trunk information # from bottom, otherwise from top if mappings: pod_id = mappings[0][0]['pod_id'] current_pod = db_api.get_pod(t_ctx, pod_id) ret = self._get_trunks_from_pod_with_limit( context, current_pod, bottom_top_map, top_bottom_map, filters, limit, marker) else: ret = self._get_trunks_from_top_with_limit( context, top_bottom_map, filters, limit, marker) else: current_pod = db_api.get_next_bottom_pod(t_ctx) # if current_pod exists, we retrieve trunk information # from bottom, otherwise from top if current_pod: ret = self._get_trunks_from_pod_with_limit( context, current_pod, bottom_top_map, top_bottom_map, filters, limit, None) else: ret = self._get_trunks_from_top_with_limit( context, top_bottom_map, filters, limit, None) else: pods = db_api.list_pods(t_ctx) _filters = self._transform_trunk_filters(filters, top_bottom_map) for pod in pods: if not pod['az_name']: continue client = self._get_client(pod['region_name']) pod_trunks = client.list_trunks(t_ctx, filters=_filters) ret.extend(pod_trunks) ret = self._map_trunks_from_bottom_to_top(ret, bottom_top_map) top_trunks = self._get_trunks_from_top(context, top_bottom_map, filters) ret.extend(top_trunks) return [db_utils.resource_fields(trunk, fields) for trunk in ret]
def _make_service_graph_dict(self, graph_db, fields=None): res = { 'id': graph_db['id'], 'name': graph_db['name'], 'project_id': graph_db['project_id'], 'description': graph_db['description'], 'port_chains': self._graph_assocs_to_pc_dict( graph_db['graph_chain_associations']) } return db_utils.resource_fields(res, fields)
def _dvs_get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields)
def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # if id is not specified in the filter, we just return subnet data in # local Neutron server, otherwise id is specified, we need to retrieve # subnet data from central Neutron server and create subnet which # doesn't exist in local Neutron server. if not filters or 'id' not in filters: return self.core_plugin.get_subnets( context, filters, fields, sorts, limit, marker, page_reverse) t_ctx = t_context.get_context_from_neutron_context(context) b_full_subnets = self.core_plugin.get_subnets( context, filters, None, sorts, limit, marker, page_reverse) b_subnets = [] for b_subnet in b_full_subnets: if b_subnet['enable_dhcp']: self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet) b_subnets.append(db_utils.resource_fields(b_subnet, fields)) if len(b_subnets) == len(filters['id']): return b_subnets if self._skip_non_api_query(t_ctx): return b_subnets raw_client = self.neutron_handle._get_client(t_ctx) params = self._construct_params(filters, sorts, limit, marker, page_reverse) t_subnets = raw_client.list_subnets(**params)['subnets'] t_id_set = set([subnet['id'] for subnet in t_subnets]) b_id_set = set([subnet['id'] for subnet in b_subnets]) missing_id_set = t_id_set - b_id_set if missing_id_set: missing_subnets = [subnet for subnet in t_subnets if ( subnet['id'] in missing_id_set)] for subnet in missing_subnets: valid = self._is_valid_network(context, subnet['network_id']) if not valid: continue b_subnet = self._create_bottom_subnet(t_ctx, context, subnet) if b_subnet['enable_dhcp']: self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet) b_subnets.append(db_utils.resource_fields(b_subnet, fields)) return b_subnets
def get_flavor_service_profile(context, service_profile_id, flavor_id, fields=None): if not obj_flavor.FlavorServiceProfileBinding.objects_exist( context, service_profile_id=service_profile_id, flavor_id=flavor_id): raise flav_exc.FlavorServiceProfileBindingNotFound( sp_id=service_profile_id, fl_id=flavor_id) res = {'service_profile_id': service_profile_id, 'flavor_id': flavor_id} return db_utils.resource_fields(res, fields)
def _make_tap_flow_dict(self, tap_flow, fields=None): res = {'id': tap_flow['id'], 'tenant_id': tap_flow['tenant_id'], 'tap_service_id': tap_flow['tap_service_id'], 'name': tap_flow['name'], 'description': tap_flow['description'], 'source_port': tap_flow['source_port'], 'direction': tap_flow['direction'], 'status': tap_flow['status'], 'vlan_filter': tap_flow['vlan_filter']} return db_utils.resource_fields(res, fields)
def _make_firewall_policy_dict(self, firewall_policy, fields=None): fw_rules = [ rule_association.firewall_rule_id for rule_association in firewall_policy['rule_associations']] res = {'id': firewall_policy['id'], 'tenant_id': firewall_policy['tenant_id'], 'name': firewall_policy['name'], 'description': firewall_policy['description'], 'audited': firewall_policy['audited'], 'firewall_rules': fw_rules, 'shared': firewall_policy['shared']} return db_utils.resource_fields(res, fields)
def _make_segment_dict(segment_obj, fields=None): res = {'id': segment_obj['id'], 'network_id': segment_obj['network_id'], 'name': segment_obj['name'], 'description': segment_obj['description'], db.PHYSICAL_NETWORK: segment_obj[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_obj[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_obj[db.SEGMENTATION_ID], 'hosts': segment_obj['hosts'], 'segment_index': segment_obj['segment_index']} resource_extend.apply_funcs('segments', res, segment_obj.db_obj) return db_utils.resource_fields(res, fields)
def _make_security_group_dict(self, security_group, fields=None): res = {'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} res['security_group_rules'] = [ self._make_security_group_rule_dict(r.db_obj) for r in security_group.rules ] resource_extend.apply_funcs(ext_sg.SECURITYGROUPS, res, security_group.db_obj) return db_utils.resource_fields(res, fields)
def _make_agent_dict(self, agent, fields=None): attr = agent_apidef.RESOURCE_ATTRIBUTE_MAP.get( agent_apidef.COLLECTION_NAME) res = dict((k, agent[k]) for k in attr if k not in ['alive', 'configurations']) res['alive'] = not utils.is_agent_down( res['heartbeat_timestamp'] ) res['configurations'] = self._get_dict(agent, 'configurations') res['resource_versions'] = self._get_dict(agent, 'resource_versions', ignore_missing=True) res['availability_zone'] = agent['availability_zone'] return db_utils.resource_fields(res, fields)
def _make_subnet_dict(self, subnet, fields=None, context=None): res = {'id': subnet['id'], 'name': subnet['name'], 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], } res['gateway_ip'] = str( subnet['gateway_ip']) if subnet['gateway_ip'] else None # TODO(korzen) this method can get subnet as DB object or Subnet OVO, # so temporary workaround will be to fill in the fields in separate # ways. After converting all code pieces to use Subnet OVO, the latter # 'else' can be deleted if isinstance(subnet, subnet_obj.Subnet): res['cidr'] = str(subnet.cidr) res['allocation_pools'] = [{'start': str(pool.start), 'end': str(pool.end)} for pool in subnet.allocation_pools] res['host_routes'] = [{'destination': str(route.destination), 'nexthop': str(route.nexthop)} for route in subnet.host_routes] res['dns_nameservers'] = [str(dns.address) for dns in subnet.dns_nameservers] res['shared'] = subnet.shared # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet.db_obj) else: res['cidr'] = subnet['cidr'] res['allocation_pools'] = [{'start': pool['first_ip'], 'end': pool['last_ip']} for pool in subnet['allocation_pools']] res['host_routes'] = [{'destination': route['destination'], 'nexthop': route['nexthop']} for route in subnet['routes']] res['dns_nameservers'] = [dns['address'] for dns in subnet['dns_nameservers']] # The shared attribute for a subnet is the same # as its parent network res['shared'] = self._is_network_shared(context, subnet.rbac_entries) # Call auxiliary extend functions, if any resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) return db_utils.resource_fields(res, fields)