def _from_db_object(context, instance, db_inst, expected_attrs=None): """Method to help with migration to objects. Converts a database entity to a formal object. """ instance._context = context if expected_attrs is None: expected_attrs = [] # Most of the field names match right now, so be quick for field in instance.fields: if field in INSTANCE_OPTIONAL_ATTRS: continue elif field == 'deleted': instance.deleted = db_inst['deleted'] == db_inst['id'] elif field == 'cleaned': instance.cleaned = db_inst['cleaned'] == 1 else: instance[field] = db_inst[field] if 'metadata' in expected_attrs: instance['metadata'] = utils.instance_meta(db_inst) if 'system_metadata' in expected_attrs: instance['system_metadata'] = utils.instance_sys_meta(db_inst) if 'fault' in expected_attrs: instance['fault'] = ( objects.InstanceFault.get_latest_for_instance( context, instance.uuid)) if 'numa_topology' in expected_attrs: instance._load_numa_topology() if 'pci_requests' in expected_attrs: instance._load_pci_requests() if 'info_cache' in expected_attrs: if db_inst['info_cache'] is None: instance.info_cache = None elif not instance.obj_attr_is_set('info_cache'): # TODO(danms): If this ever happens on a backlevel instance # passed to us by a backlevel service, things will break instance.info_cache = objects.InstanceInfoCache(context) if instance.info_cache is not None: instance.info_cache._from_db_object(context, instance.info_cache, db_inst['info_cache']) # TODO(danms): If we are updating these on a backlevel instance, # we'll end up sending back new versions of these objects (see # above note for new info_caches if 'pci_devices' in expected_attrs: pci_devices = base.obj_make_list( context, objects.PciDeviceList(context), objects.PciDevice, db_inst['pci_devices']) instance['pci_devices'] = pci_devices if 'security_groups' in expected_attrs: sec_groups = base.obj_make_list( context, objects.SecurityGroupList(context), objects.SecurityGroup, db_inst['security_groups']) instance['security_groups'] = sec_groups instance.obj_reset_changes() return instance
def service_get_all(self, context, filters=None, set_zones=False): if filters is None: filters = {} if "availability_zone" in filters: zone_filter = filters.pop("availability_zone") set_zones = True else: zone_filter = None services = self.cells_rpcapi.service_get_all(context, filters=filters) if set_zones: # TODO(sbauza): set_availability_zones returns flat dicts, # we should rather modify the RPC API to amend service_get_all by # adding a set_zones argument services = availability_zones.set_availability_zones(context, services) if zone_filter is not None: services = [s for s in services if s["availability_zone"] == zone_filter] # NOTE(sbauza): As services is a list of flat dicts, we need to # rehydrate the corresponding ServiceProxy objects cell_paths = [] for service in services: cell_path, id = cells_utils.split_cell_and_item(service["id"]) cell_path, host = cells_utils.split_cell_and_item(service["host"]) service["id"] = id service["host"] = host cell_paths.append(cell_path) services = obj_base.obj_make_list(context, objects.ServiceList(), objects.Service, services) services = [cells_utils.ServiceProxy(s, c) for s, c in zip(services, cell_paths)] return services
def get_all(cls, context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): try: api_db_flavors = _flavor_get_all_from_db(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) # NOTE(danms): If we were asked for a marker and found it in # results from the API DB, we must continue our pagination with # just the limit (if any) to the main DB. marker = None except exception.MarkerNotFound: api_db_flavors = [] if limit is not None: limit_more = limit - len(api_db_flavors) else: limit_more = None if limit_more is None or limit_more > 0: db_flavors = db.flavor_get_all(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit_more, marker=marker) else: db_flavors = [] return base.obj_make_list(context, cls(context), objects.Flavor, api_db_flavors + db_flavors, expected_attrs=['extra_specs'])
def get_latest_by_instance_uuids(cls, context, instance_uuids): db_faultdict = db.instance_fault_get_by_instance_uuids(context, instance_uuids, latest=True) db_faultlist = itertools.chain(*db_faultdict.values()) return base.obj_make_list(context, cls(context), objects.InstanceFault, db_faultlist)
def get_all(cls, context, disabled=None, set_zones=False): db_services = db.service_get_all(context, disabled=disabled) if set_zones: db_services = availability_zones.set_availability_zones( context, db_services) return base.obj_make_list(context, cls(context), objects.Service, db_services)
def service_get_all(self, context, filters=None, set_zones=False): if filters is None: filters = {} if "availability_zone" in filters: zone_filter = filters.pop("availability_zone") set_zones = True else: zone_filter = None services = self.cells_rpcapi.service_get_all(context, filters=filters) if set_zones: services = availability_zones.set_availability_zones(context, services) if zone_filter is not None: services = [s for s in services if s["availability_zone"] == zone_filter] # NOTE(johannes): Cells adds the cell path as a prefix to the id # to uniquely identify the service amongst all cells. Unfortunately # the object model makes the id an integer. Use a proxy here to # work around this particular problem. # Split out the cell path first cell_paths = [] for service in services: cell_path, id = cells_utils.split_cell_and_item(service["id"]) service["id"] = id cell_paths.append(cell_path) # NOTE(danms): Currently cells does not support objects as # return values, so just convert the db-formatted service objects # to new-world objects here services = obj_base.obj_make_list(context, objects.ServiceList(), objects.Service, services) # Now wrap it in the proxy with the original cell_path services = [ServiceProxy(s, c) for s, c in zip(services, cell_paths)] return services
def _get_by_service(cls, context, service_id, use_slave=False): try: db_computes = db.compute_nodes_get_by_service_id(context, service_id) except exception.ServiceNotFound: # NOTE(sbauza): Previous behaviour was returning an empty list # if the service was created with no computes, we need to keep it. db_computes = [] return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_all(cls, context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): db_flavors = db.flavor_get_all(context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) return base.obj_make_list(context, cls(), Flavor, db_flavors, expected_attrs=['extra_specs'])
def get_by_metadata(cls, context, key=None, value=None): """Return aggregates with a metadata key set to value. This returns a list of all aggregates that have a metadata key set to some value. If key is specified, then only values for that key will qualify. """ db_aggregates = _get_by_metadata_from_db(context, key=key, value=value) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates)
def get_by_metadata_key(cls, context, key, hosts=None): api_db_aggregates = [cls._fill_deprecated(agg) for agg in _get_by_metadata_key_from_db(context, key=key)] db_aggregates = db.aggregate_get_by_metadata_key(context, key=key) all_aggregates = db_aggregates + api_db_aggregates if hosts is not None: all_aggregates = cls._filter_db_aggregates(all_aggregates, hosts) return base.obj_make_list(context, cls(context), objects.Aggregate, all_aggregates)
def _return_servers_objs(context, search_opts=None, limit=None, marker=None, expected_attrs=None, sort_keys=None, sort_dirs=None): db_insts = fake_instance_get_all_by_filters()(None, limit=limit, marker=marker) expected = ['metadata', 'system_metadata', 'flavor', 'info_cache', 'security_groups'] return base.obj_make_list(context, objects.InstanceList(), objects.Instance, db_insts, expected_attrs=expected)
def test_obj_make_list(self): class MyList(base.ObjectListBase, base.NovaObject): pass db_objs = [{"foo": 1, "bar": "baz", "missing": "banana"}, {"foo": 2, "bar": "bat", "missing": "apple"}] mylist = base.obj_make_list("ctxt", MyList(), MyObj, db_objs) self.assertEqual(2, len(mylist)) self.assertEqual("ctxt", mylist._context) for index, item in enumerate(mylist): self.assertEqual(db_objs[index]["foo"], item.foo) self.assertEqual(db_objs[index]["bar"], item.bar) self.assertEqual(db_objs[index]["missing"], item.missing)
def _return_servers_objs( context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None, ): db_insts = fake_instance_get_all_by_filters()(None, limit=limit, marker=marker) expected = ["metadata", "system_metadata", "flavor", "info_cache", "security_groups"] return base.obj_make_list(context, objects.InstanceList(), objects.Instance, db_insts, expected_attrs=expected)
def get_all( cls, context, inactive=False, filters=None, sort_key="flavorid", sort_dir="asc", limit=None, marker=None ): db_flavors = db.flavor_get_all( context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, ) return base.obj_make_list(context, cls(context), objects.Flavor, db_flavors, expected_attrs=["extra_specs"])
def test_obj_make_list(self): class MyList(base.ObjectListBase, base.NovaObject): pass db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'}, {'foo': 2, 'bar': 'bat', 'missing': 'apple'}, ] mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs) self.assertEqual(2, len(mylist)) self.assertEqual('ctxt', mylist._context) for index, item in enumerate(mylist): self.assertEqual(db_objs[index]['foo'], item.foo) self.assertEqual(db_objs[index]['bar'], item.bar) self.assertEqual(db_objs[index]['missing'], item.missing)
def get_not_deleted_by_cell_and_project(cls, context, cell_uuid, project_id, limit=None): """Return a limit restricted list of InstanceMapping objects which are mapped to the specified cell_uuid, belong to the specified project_id and are not queued for deletion (note that unlike the other InstanceMappingList query methods which return all mappings irrespective of whether they are queued for deletion this method explicitly queries only for those mappings that are *not* queued for deletion as is evident from the naming of the method). """ db_mappings = cls._get_not_deleted_by_cell_and_project_from_db( context, cell_uuid, project_id, limit) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings)
def service_get_all(self, context, filters=None, set_zones=False, all_cells=False): """Get all services. Note that this is the cellsv1 variant, which means we ignore the "all_cells" parameter. """ if filters is None: filters = {} if 'availability_zone' in filters: zone_filter = filters.pop('availability_zone') set_zones = True else: zone_filter = None services = self.cells_rpcapi.service_get_all(context, filters=filters) if set_zones: # TODO(sbauza): set_availability_zones returns flat dicts, # we should rather modify the RPC API to amend service_get_all by # adding a set_zones argument services = availability_zones.set_availability_zones(context, services) if zone_filter is not None: services = [s for s in services if s['availability_zone'] == zone_filter] # NOTE(sbauza): As services is a list of flat dicts, we need to # rehydrate the corresponding ServiceProxy objects cell_paths = [] for service in services: cell_path, id = cells_utils.split_cell_and_item(service['id']) cell_path, host = cells_utils.split_cell_and_item( service['host']) service['id'] = id service['host'] = host cell_paths.append(cell_path) services = obj_base.obj_make_list(context, objects.ServiceList(), objects.Service, services) services = [cells_utils.ServiceProxy(s, c) for s, c in zip(services, cell_paths)] return services
def get_all_by_host(cls, context, host, use_slave=False): try: db_computes = db.compute_node_get_all_by_host(context, host, use_slave) except exception.ComputeHostNotFound: # FIXME(sbauza): Some old computes can still have no host record # We need to provide compatibility by using the old service_id # record. # We assume the compatibility as an extra penalty of one more DB # call but that's necessary until all nodes are upgraded. try: service = objects.Service.get_by_compute_host(context, host, use_slave) db_computes = db.compute_nodes_get_by_service_id(context, service.id) except exception.ServiceNotFound: # We need to provide the same exception upstream raise exception.ComputeHostNotFound(host=host) # We can avoid an extra call to Service object in _from_db_object for db_compute in db_computes: db_compute["host"] = service.host return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for field in fixedip.fields: if field == "default_route": # NOTE(danms): This field is only set when doing a # FixedIPList.get_by_network() because it's a relatively # special-case thing, so skip it here continue if field not in FIXED_IP_OPTIONAL_ATTRS: fixedip[field] = db_fixedip[field] # NOTE(danms): Instance could be deleted, and thus None if "instance" in expected_attrs: fixedip.instance = ( objects.Instance._from_db_object(context, objects.Instance(context), db_fixedip["instance"]) if db_fixedip["instance"] else None ) if "network" in expected_attrs: fixedip.network = ( objects.Network._from_db_object(context, objects.Network(context), db_fixedip["network"]) if db_fixedip["network"] else None ) if "virtual_interface" in expected_attrs: db_vif = db_fixedip["virtual_interface"] vif = ( objects.VirtualInterface._from_db_object( context, objects.VirtualInterface(context), db_fixedip["virtual_interface"] ) if db_vif else None ) fixedip.virtual_interface = vif if "floating_ips" in expected_attrs: fixedip.floating_ips = obj_base.obj_make_list( context, objects.FloatingIPList(context), objects.FloatingIP, db_fixedip["floating_ips"] ) fixedip._context = context fixedip.obj_reset_changes() return fixedip
def get_all_by_host(cls, context, host, use_slave=False): try: db_computes = db.compute_node_get_all_by_host(context, host, use_slave) except exception.ComputeHostNotFound: # FIXME(sbauza): Some old computes can still have no host record # We need to provide compatibility by using the old service_id # record. # We assume the compatibility as an extra penalty of one more DB # call but that's necessary until all nodes are upgraded. service = objects.Service.get_by_compute_host(context, host, use_slave) db_compute = db.compute_node_get_by_service_id(context, service.id) # We can avoid an extra call to Service object in _from_db_object db_compute['host'] = service.host # NOTE(sbauza): Yeah, the old model sucks, because there can only # be one node per host... db_computes = [db_compute] return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def fake_bdm_list_get_by_instance_uuid(cls, context, instance_uuid): db_list = [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_uuid, 'device_name': '/dev/fake0', 'delete_on_termination': 'False', 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': FAKE_UUID_A, 'volume_size': 1}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': instance_uuid, 'device_name': '/dev/fake1', 'delete_on_termination': 'False', 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': FAKE_UUID_B, 'volume_size': 1})] item_cls = objects.BlockDeviceMapping return base.obj_make_list(context, cls(), item_cls, db_list)
def service_get_all(self, context, filters=None, set_zones=False): if filters is None: filters = {} if 'availability_zone' in filters: zone_filter = filters.pop('availability_zone') set_zones = True else: zone_filter = None services = self.cells_rpcapi.service_get_all(context, filters=filters) if set_zones: services = availability_zones.set_availability_zones(context, services) if zone_filter is not None: services = [s for s in services if s['availability_zone'] == zone_filter] # NOTE(danms): Currently cells does not support objects as # return values, so just convert the db-formatted service objects # to new-world objects here return obj_base.obj_make_list(context, service_obj.ServiceList(), service_obj.Service, services)
def get_by_user(cls, context, user_id, limit=None, marker=None): try: api_db_keypairs = cls._get_from_db( context, user_id, limit=limit, marker=marker) # NOTE(pkholkin): If we were asked for a marker and found it in # results from the API DB, we must continue our pagination with # just the limit (if any) to the main DB. marker = None except exception.MarkerNotFound: api_db_keypairs = [] if limit is not None: limit_more = limit - len(api_db_keypairs) else: limit_more = None if limit_more is None or limit_more > 0: main_db_keypairs = db.key_pair_get_all_by_user( context, user_id, limit=limit_more, marker=marker) else: main_db_keypairs = [] return base.obj_make_list(context, cls(context), objects.KeyPair, api_db_keypairs + main_db_keypairs)
def get_by_instance_uuid(cls, context, uuid): db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list)
def get_by_compute_node(cls, context, node_id): db_dev_list = db.pci_device_get_all_by_node(context, node_id) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list)
def get_by_project_id(cls, context, project_id): groups = db.instance_group_get_all_by_project_id(context, project_id) return base.obj_make_list(context, InstanceGroupList(), InstanceGroup, groups)
def get_by_project(cls, context, project_id): groups = db.security_group_get_by_project(context, project_id) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups)
def get_by_disabled(cls, context, disabled): db_mappings = cls._get_by_disabled_from_db(context, disabled) return base.obj_make_list(context, cls(), CellMapping, db_mappings)
def get_all(cls, context): db_mappings = cls._get_all_from_db(context) return base.obj_make_list(context, cls(), CellMapping, db_mappings)
def get_by_host(cls, context, host): db_fixedips = db.fixed_ip_get_by_host(context, host) return obj_base.obj_make_list(context, cls(context), objects.FixedIP, db_fixedips)
def get_by_metadata_key(cls, context, key, hosts=None): db_aggregates = db.aggregate_get_by_metadata_key(context, key=key) if hosts is not None: db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates)
def get_by_hypervisor_type(cls, context, hv_type): db_computes = cls._db_compute_node_get_by_hv_type(context, hv_type) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_by_instance_uuids(cls, context, uuids): db_mappings = cls._get_by_instance_uuids_from_db(context, uuids) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings)
def get_all_by_host(cls, context, host, use_slave=False): db_computes = cls._db_compute_node_get_all_by_host(context, host, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_by_pagination(cls, context, limit=None, marker=None): db_computes = db.compute_node_get_all_by_pagination( context, limit=limit, marker=marker) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_all(cls, context): groups = db.instance_group_get_all(context) return base.obj_make_list(context, InstanceGroupList(), InstanceGroup, groups)
def get_by_user(cls, context, user_id): db_keypairs = db.key_pair_get_all_by_user(context, user_id) return base.obj_make_list(context, KeyPairList(), KeyPair, db_keypairs)
def get_by_parent_address(cls, context, node_id, parent_addr): db_dev_list = db.pci_device_get_all_by_parent_addr( context, node_id, parent_addr) return base.obj_make_list(context, cls(context), objects.PciDevice, db_dev_list)
def get_by_project_id(cls, context, project_id): """Return a list of CellMapping objects which correspond to cells in which project_id has InstanceMappings. """ db_mappings = cls._get_by_project_id_from_db(context, project_id) return base.obj_make_list(context, cls(), CellMapping, db_mappings)
def _get_by_service(cls, context, service_id): db_service = db.service_get(context, service_id, with_compute_node=True) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_service['compute_node'])
def get_by_host(cls, context, host, key=None): db_aggregates = db.aggregate_get_by_host(context, host, key=key) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates)
def get_all(cls, context): groups = db.security_group_get_all(context) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups)
def get_all(cls, context): db_aggregates = db.aggregate_get_all(context) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates)
def get_by_instance(cls, context, instance): groups = db.security_group_get_by_instance(context, instance.uuid) return base.obj_make_list(context, cls(context), objects.SecurityGroup, groups)
def get_by_binary(cls, context, binary, include_disabled=False): db_services = db.service_get_all_by_binary( context, binary, include_disabled=include_disabled) return base.obj_make_list(context, cls(context), objects.Service, db_services)
def get_by_host(cls, context, host): db_services = db.service_get_all_by_host(context, host) return base.obj_make_list(context, cls(context), objects.Service, db_services)
def get_all_computes_by_hv_type(cls, context, hv_type): db_services = db.service_get_all_computes_by_hv_type( context, hv_type, include_disabled=False) return base.obj_make_list(context, cls(context), objects.Service, db_services)
def get_all(cls, context): api_db_groups = cls._get_from_db(context) return base.obj_make_list(context, cls(context), objects.InstanceGroup, api_db_groups)
def get_by_cell_id(cls, context, cell_id): db_mappings = cls._get_by_cell_id_from_db(context, cell_id) return base.obj_make_list(context, cls(), objects.InstanceMapping, db_mappings)
def get_all(cls, context): db_computes = db.compute_node_get_all(context) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_all(cls, context): db_vifs = db.virtual_interface_get_all(context) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs)
def get_by_project_id(cls, context, project_id): api_db_groups = cls._get_from_db(context, project_id=project_id) return base.obj_make_list(context, cls(context), objects.InstanceGroup, api_db_groups)
def get_by_hypervisor(cls, context, hypervisor_match): db_computes = db.compute_node_search_by_hypervisor(context, hypervisor_match) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs)
def get_all_by_uuids(cls, context, compute_uuids): db_computes = cls._db_compute_node_get_all_by_uuids(context, compute_uuids) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes)
def get_all(cls, context): db_computes = db.compute_node_get_all(context) return base.obj_make_list(context, ComputeNodeList(), ComputeNode, db_computes)
def get_by_hypervisor(cls, context, hypervisor_match): db_computes = db.compute_node_search_by_hypervisor( context, hypervisor_match) return base.obj_make_list(context, ComputeNodeList(), ComputeNode, db_computes)
def get_by_uuids(cls, context, uuids, start_period=None, use_slave=False): db_bw_usages = cls._db_bw_usage_get_by_uuids(context, uuids=uuids, start_period=start_period, use_slave=use_slave) return base.obj_make_list(context, cls(), BandwidthUsage, db_bw_usages)
def create(cls, context, ip_info, want_result=False): db_floatingips = db.floating_ip_bulk_create(context, ip_info) if want_result: return obj_base.obj_make_list(context, cls(), FloatingIP, db_floatingips)