def test_instance_get_all_by_filters_paginate(self): self.flags(sql_connection="notdb://") test1 = self.create_instances_with_args(display_name='test1') test2 = self.create_instances_with_args(display_name='test2') test3 = self.create_instances_with_args(display_name='test3') result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, marker=None) self.assertEqual(3, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test1['uuid']) self.assertEqual(2, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test2['uuid']) self.assertEqual(1, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test3['uuid']) self.assertEqual(0, len(result)) self.assertRaises(exception.MarkerNotFound, db.instance_get_all_by_filters, self.context, {'display_name': '%test%'}, marker=str(uuidutils.uuid4()))
def test_instance_get_all(self): self.mox.StubOutWithMock(db, "instance_get_all_by_filters") db.instance_get_all(self.context) db.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc") self.mox.ReplayAll() self.conductor.instance_get_all(self.context) self.conductor.instance_get_all_by_filters(self.context, {"name": "fake-inst"}, "updated_at", "asc")
def test_instance_get_all_by_filters_paginate(self): self.flags(sql_connection="notdb://") test1 = self.create_instances_with_args(display_name="test1") test2 = self.create_instances_with_args(display_name="test2") test3 = self.create_instances_with_args(display_name="test3") result = db.instance_get_all_by_filters(self.context, {"display_name": "%test%"}, marker=None) self.assertEqual(3, len(result)) result = db.instance_get_all_by_filters( self.context, {"display_name": "%test%"}, sort_dir="asc", marker=test1["uuid"] ) self.assertEqual(2, len(result)) result = db.instance_get_all_by_filters( self.context, {"display_name": "%test%"}, sort_dir="asc", marker=test2["uuid"] ) self.assertEqual(1, len(result)) result = db.instance_get_all_by_filters( self.context, {"display_name": "%test%"}, sort_dir="asc", marker=test3["uuid"] ) self.assertEqual(0, len(result)) self.assertRaises( exception.MarkerNotFound, db.instance_get_all_by_filters, self.context, {"display_name": "%test%"}, marker=str(utils.gen_uuid()), )
def test_get_all_by_filters_works_for_cleaned(self): fakes = [ self.fake_instance(1), self.fake_instance(2, updates={ 'deleted': 2, 'cleaned': None }) ] self.context.read_deleted = 'yes' self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, { 'deleted': True, 'cleaned': False }, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], use_slave=False).AndReturn([fakes[1]]) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, { 'deleted': True, 'cleaned': False }, 'uuid', 'asc', expected_attrs=['metadata'], use_slave=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid']) self.assertRemotes()
def test_get_all_by_filters_works_for_cleaned(self): fakes = [self.fake_instance(1), self.fake_instance(2, updates={"deleted": 2, "cleaned": None})] self.context.read_deleted = "yes" self.mox.StubOutWithMock(db, "instance_get_all_by_filters") db.instance_get_all_by_filters( self.context, {"deleted": True, "cleaned": False}, "uuid", "asc", limit=None, marker=None, columns_to_join=["metadata"], use_slave=False, ).AndReturn([fakes[1]]) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {"deleted": True, "cleaned": False}, "uuid", "asc", expected_attrs=["metadata"], use_slave=False, ) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) self.assertEqual(inst_list.objects[0].uuid, fakes[1]["uuid"]) self.assertRemotes()
def test_instance_get_all_by_filters(self): filters = {'foo': 'bar'} self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort') self.mox.ReplayAll() self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort')
def test_instance_get_all(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all(self.context) db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc') self.mox.ReplayAll() self.conductor.instance_get_all(self.context) self.conductor.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc')
def test_instance_get_all_by_filters_regex_unsupported_db(self): """Ensure that the 'LIKE' operator is used for unsupported dbs.""" self.flags(sql_connection="notdb://") self.create_instances_with_args(display_name="test1") self.create_instances_with_args(display_name="test.*") self.create_instances_with_args(display_name="diff") result = db.instance_get_all_by_filters(self.context, {"display_name": "test.*"}) self.assertEqual(1, len(result)) result = db.instance_get_all_by_filters(self.context, {"display_name": "%test%"}) self.assertEqual(2, len(result))
def test_get_all_by_filters(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, "instance_get_all_by_filters") db.instance_get_all_by_filters( self.context, {"foo": "bar"}, "uuid", "asc", limit=None, marker=None, columns_to_join=["metadata"] ).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {"foo": "bar"}, "uuid", "asc", expected_attrs=["metadata"] ) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(inst_list.objects[i].uuid, fakes[i]["uuid"]) self.assertRemotes()
def get_instances_to_sync(context, updated_since=None, project_id=None, deleted=True, shuffle=False, uuids_only=False): """Return a generator that will return a list of active and deleted instances to sync with parent cells. The list may optionally be shuffled for periodic updates so that multiple cells services aren't self-healing the same instances in nearly lockstep. """ filters = {} if updated_since is not None: filters['changes-since'] = updated_since if project_id is not None: filters['project_id'] = project_id if not deleted: filters['deleted'] = False # Active instances first. instances = db.instance_get_all_by_filters(context, filters, 'deleted', 'asc') if shuffle: random.shuffle(instances) for instance in instances: if uuids_only: yield instance['uuid'] else: yield instance
def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ resources = self.driver.get_available_resource() if not resources: # The virt driver does not support this function LOG.audit( _("Virt driver does not support " "'get_available_resource' Compute tracking is disabled.")) self.compute_node = None self.claims = {} return self._verify_resources(resources) self._report_hypervisor_resource_view(resources) self._purge_expired_claims() # Grab all instances assigned to this host: filters = {'host': self.host, 'deleted': False} instances = db.instance_get_all_by_filters(context, filters) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) self._report_final_resource_view(resources) self._sync_compute_node(context, resources)
def test_instance_get_all_by_filters_unicode_value(self): args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1', 'display_name': u'test♥'} db.instance_create(self.context, args) result = db.instance_get_all_by_filters(self.context.elevated(), {'display_name': u'test'}) self.assertEqual(1, len(result))
def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ resources = self.driver.get_available_resource() if not resources: # The virt driver does not support this function LOG.audit(_("Virt driver does not support " "'get_available_resource' Compute tracking is disabled.")) self.compute_node = None self.claims = {} return self._verify_resources(resources) self._report_hypervisor_resource_view(resources) self._purge_expired_claims() # Grab all instances assigned to this host: filters = {'host': self.host, 'deleted': False} instances = db.instance_get_all_by_filters(context, filters) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) self._report_final_resource_view(resources) self._sync_compute_node(context, resources)
def _GET_servers(self, req, res, body): context = req.environ['nova.context'] servers = self._extract_resource_from_body( res, body, singular='server', singular_template=ServerDiskConfigTemplate(), plural='servers', plural_template=ServersDiskConfigTemplate()) # Filter out any servers that already have the key set (most likely # from a remote zone) servers = filter(lambda s: self.API_DISK_CONFIG not in s, servers) # Get DB information for servers uuids = [server['id'] for server in servers] db_servers = db.instance_get_all_by_filters(context, {'uuid': uuids}) db_servers = dict([(s['uuid'], s) for s in db_servers]) for server in servers: db_server = db_servers.get(server['id']) if db_server: value = db_server[self.INTERNAL_DISK_CONFIG] server[self.API_DISK_CONFIG] = disk_config_to_api(value) return res
def get_instances_to_sync(context, updated_since=None, project_id=None, deleted=True, shuffle=False, uuids_only=False): """Return a generator that will return a list of active and deleted instances to sync with parent cells. The list may optionally be shuffled for periodic updates so that multiple cells services aren't self-healing the same instances in nearly lockstep. """ filters = {} if updated_since is not None: filters['changes-since'] = updated_since if project_id is not None: filters['project_id'] = project_id if not deleted: filters['deleted'] = False # Active instances first. instances = db.instance_get_all_by_filters( context, filters, 'deleted', 'asc') if shuffle: random.shuffle(instances) for instance in instances: if uuids_only: yield instance['uuid'] else: yield instance
def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None, use_slave=False, sort_keys=None, sort_dirs=None): if sort_keys or sort_dirs: db_inst_list = db.instance_get_all_by_filters_sort( context, filters, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs) else: db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def test_get_all_by_filters(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata']).AndReturn( fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', expected_attrs=['metadata']) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid']) self.assertRemotes()
def test_instance_get_all_by_filters_regex(self): self.create_instances_with_args(display_name='test1') self.create_instances_with_args(display_name='teeeest2') self.create_instances_with_args(display_name='diff') result = db.instance_get_all_by_filters(self.context, {'display_name': 't.*st.'}) self.assertEqual(2, len(result))
def test_get_all_by_filters(self): fakes = [self.fake_instance(1), self.fake_instance(2)] self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata'], use_slave=False).AndReturn(fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', expected_attrs=['metadata'], use_slave=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid']) self.assertRemotes()
def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None): db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def test_get_all_by_filters(self): fakes = [self.fake_instance(1), self.fake_instance(2)] ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(ctxt, {'foo': 'bar'}, 'uuid', 'asc', None, None, columns_to_join=['metadata']).AndReturn( fakes) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( ctxt, {'foo': 'bar'}, 'uuid', 'asc', expected_attrs=['metadata']) for i in range(0, len(fakes)): self.assertTrue(isinstance(inst_list.objects[i], instance.Instance)) self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid']) self.assertRemotes()
def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None): db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def group_hosts(self, context, group): """Return the list of hosts that have VM's from the group.""" # The system_metadata 'group' will be filtered members = db.instance_get_all_by_filters(context, {'deleted': False, 'group': group}) return [member['host'] for member in members if member.get('host') is not None]
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(1, len(result)) self.assertEqual(result[0].id, inst2.id)
def group_hosts(self, context, group): """Return the list of hosts that have VM's from the group.""" # The system_metadata 'group' will be filtered members = db.instance_get_all_by_filters(context, {'deleted': False, 'system_metadata': {'group': group}}) return [member['host'] for member in members if member.get('host') is not None]
def test_instance_get_all_by_filters_deleted(self): args1 = {"reservation_id": "a", "image_ref": 1, "host": "host1"} inst1 = db.instance_create(self.context, args1) args2 = {"reservation_id": "b", "image_ref": 1, "host": "host1"} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertEqual(result[0].id, inst2.id) self.assertEqual(result[1].id, inst1.id) self.assertTrue(result[1].deleted)
def test_get_all_by_filters_works_for_cleaned(self): fakes = [self.fake_instance(1), self.fake_instance(2, updates={'deleted': 2, 'cleaned': None})] self.context.read_deleted = 'yes' self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all_by_filters(self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', limit=None, marker=None, columns_to_join=['metadata']).AndReturn( [fakes[1]]) self.mox.ReplayAll() inst_list = instance.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', expected_attrs=['metadata']) self.assertEqual(1, len(inst_list)) self.assertTrue(isinstance(inst_list.objects[0], instance.Instance)) self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid']) self.assertRemotes()
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertEqual(result[0].id, inst2.id) self.assertEqual(result[1].id, inst1.id) self.assertTrue(result[1].deleted)
def test_instance_get_all_by_filters_deleted(self): inst1 = self.create_instances_with_args() inst2 = self.create_instances_with_args(reservation_id='b') db.instance_destroy(self.context, inst1['uuid']) result = db.instance_get_all_by_filters(self.context, {}) self.assertEqual(2, len(result)) self.assertIn(inst1.id, [result[0].id, result[1].id]) self.assertIn(inst2.id, [result[0].id, result[1].id]) if inst1.id == result[0].id: self.assertTrue(result[0].deleted) else: self.assertTrue(result[1].deleted)
def _add_disk_config(self, context, servers): # Get DB information for servers uuids = [server['id'] for server in servers] db_servers = db.instance_get_all_by_filters(context, {'uuid': uuids}) db_servers_by_uuid = dict((s['uuid'], s) for s in db_servers) for server in servers: db_server = db_servers_by_uuid.get(server['id']) if db_server: value = db_server[INTERNAL_DISK_CONFIG] server[API_DISK_CONFIG] = disk_config_to_api(value)
def index(self, req): ''' return all quotas usage in platform. ''' context = req.environ['nova.context'] authorize(context) hosts = db.compute_node_get_all(context) memory_mb_capacity = 0 local_gb_capacity = 0 ecu_capacity = 0 private_network_qos_capacity = 0 public_network_qos_capacity = 0 for host in hosts: memory_mb_capacity += host.memory_mb local_gb_capacity += host.local_gb ecu_capacity += self._get_host_ecu(req, host).get('capacity') or 0 total_private_network_mbps = host.\ get('total_private_network_mbps') if total_private_network_mbps is not None: private_network_qos_capacity += total_private_network_mbps total_public_network_mbps = host.\ get('total_public_network_mbps') if total_private_network_mbps is not None: public_network_qos_capacity += total_public_network_mbps filters = {"deleted": False} instances = db.instance_get_all_by_filters(context, filters, "created_at", "desc") self.flavors = db.instance_type_get_all(context) usages = dict( ecus={"capacity": ecu_capacity, "ecus": []}, servers={"servers": []}, vcpus={"vcpus": []}, local_gb={"capacity": local_gb_capacity, "local_gb": []}, memory_mb={"capacity": memory_mb_capacity, "memory_mb": []}, network_qos={ "network_qos": [], "private_capacity": private_network_qos_capacity, "public_capacity": public_network_qos_capacity } ) for instance in instances: for key in usages: # Note(hzzhoushaoyu) key in usages should be the same as # list key in each item. 'key' in second parameter is not the # same hierarchy as 'key' in first parameter in usage. self._make_items(req, usages[key], key, instance) # update floating IPs usages.update(floating_ips=self._get_floating_ips(context)) return usages
def test_instance_get_all_by_filters_deleted(self): args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args1) args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'} inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context.elevated(), inst1['uuid']) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) self.assertIn(inst1.id, [result[0].id, result[1].id]) self.assertIn(inst2.id, [result[0].id, result[1].id]) if inst1.id == result[0].id: self.assertTrue(result[0].deleted) else: self.assertTrue(result[1].deleted)
def fix_usage(cntxt, tenant): # Get per-user data for this tenant since usage is now per-user filter_object = {'project_id': tenant} instance_info = db.instance_get_all_by_filters(cntxt, filter_object) usage_by_resource = {} #resource_types = ['instances', 'cores', 'ram', 'security_groups'] states_to_ignore = ['error', 'deleted', 'building'] for instance in instance_info: user = instance['user_id'] # We need to build a list of users who have launched vm's even if the user # no longer exists. We can't use keystone here. if not usage_by_resource.has_key(user): usage_by_resource[user] = { } # Record that this user has once used resources if not instance['vm_state'] in states_to_ignore: user_resource = usage_by_resource[user] user_resource['instances'] = user_resource.get('instances', 0) + 1 user_resource['cores'] = user_resource.get('cores', 0) + instance['vcpus'] user_resource['ram'] = user_resource.get('ram', 0) + instance['memory_mb'] secgroup_list = db.security_group_get_by_project(cntxt, tenant) for group in secgroup_list: user = group.user_id if not usage_by_resource.has_key(user): usage_by_resource[user] = { } # Record that this user has once used resources user_resource = usage_by_resource[user] user_resource['security_groups'] = user_resource.get( 'security_groups', 0) + 1 # Correct the quota usage in the database for user in usage_by_resource: for resource in resource_types: usage = usage_by_resource[user].get(resource, 0) try: db.quota_usage_update(cntxt, tenant, user, resource, in_use=usage) except exception.QuotaUsageNotFound as e: print e print 'db.quota_usage_update(cntxt, %s, %s, %s, in_use=%s)' % \ (tenant, user, resource, usage)
def _add_disk_config(self, context, servers): # Filter out any servers that already have the key set # (most likely from a remote zone) servers = [s for s in servers if API_DISK_CONFIG not in s] # Get DB information for servers uuids = [server['id'] for server in servers] db_servers = db.instance_get_all_by_filters(context, {'uuid': uuids}) db_servers_by_uuid = dict((s['uuid'], s) for s in db_servers) for server in servers: db_server = db_servers_by_uuid.get(server['id']) if db_server: value = db_server[INTERNAL_DISK_CONFIG] server[API_DISK_CONFIG] = disk_config_to_api(value)
def _get_by_filters_impl(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, expected_attrs=None, use_slave=False, sort_keys=None, sort_dirs=None): if sort_keys or sort_dirs: db_inst_list = db.instance_get_all_by_filters_sort( context, filters, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), sort_keys=sort_keys, sort_dirs=sort_dirs) else: db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs)) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def _add_disk_config(self, context, servers): # Filter out any servers that already have the key set (most likely # from a remote zone) servers = [s for s in servers if API_DISK_CONFIG not in s] # Get DB information for servers uuids = [server['id'] for server in servers] db_servers = db.instance_get_all_by_filters(context, {'uuid': uuids}) db_servers_by_uuid = dict((s['uuid'], s) for s in db_servers) for server in servers: db_server = db_servers_by_uuid.get(server['id']) if db_server: value = db_server[INTERNAL_DISK_CONFIG] server[API_DISK_CONFIG] = disk_config_to_api(value)
def instance_backups(self, context, instance_uuid, schedule_id=None): """Get backups for the given instance.""" filters = { 'metadata' : { meta.BACKUP_FOR_KEY : instance_uuid } } backups = db.instance_get_all_by_filters(context, filters) # Filter for schedule if schedule_id: backups = filter( lambda b: schedule_id in \ self._get_backup_schedules(context, b['uuid']), backups) # Sort by creation time backups = sorted(backups, key=lambda b: b['created_at']) # Return UUIDs return map(lambda b: self._get_backup_dict(context, b), backups)
def is_in_use(self): try: if 'id' not in self: flavor = self._flavor_get_by_flavor_id_from_db( self._context, self.flavorid) self.id = flavor['id'] except exception.FlavorNotFound: return False filters = {'deleted': False, 'instance_type_id': self.id} instances = db.instance_get_all_by_filters(self._context, filters) if not instances: # No instances currently set to this flavor, # check for instances being resized to or from this flavor migration_filters = {'status': 'migrating', 'deleted': False} migrations = objects.MigrationList.get_by_filters( self._context, migration_filters) # No migrations in progress, flavor not in use if not migrations: return False inst_uuid_from_migrations = set( [migration.instance_uuid for migration in migrations]) inst_filters = { 'uuid': inst_uuid_from_migrations, 'deleted': False } attrs = ['info_cache', 'security_groups', 'system_metadata'] instances = objects.InstanceList.get_by_filters( self._context, inst_filters, expected_attrs=attrs, use_slave=True) for instance in instances: for flv in (instance.new_flavor, instance.old_flavor): if flv is not None: if flv.flavorid == self.flavorid: return True return False return bool(instances)
def fix_usage(cntxt, tenant): # Get per-user data for this tenant since usage is now per-user filter_object = {'project_id': tenant} instance_info = db.instance_get_all_by_filters(cntxt, filter_object) usage_by_resource = {} #resource_types = ['instances', 'cores', 'ram', 'security_groups'] states_to_ignore = ['error', 'deleted', 'building'] for instance in instance_info: user = instance['user_id'] # We need to build a list of users who have launched vm's even if the user # no longer exists. We can't use keystone here. if not usage_by_resource.has_key(user): usage_by_resource[user] = {} # Record that this user has once used resources if not instance['vm_state'] in states_to_ignore: user_resource = usage_by_resource[user] user_resource['instances'] = user_resource.get('instances', 0) + 1 user_resource['cores'] = user_resource.get('cores', 0) + instance['vcpus'] user_resource['ram'] = user_resource.get('ram', 0) + instance['memory_mb'] secgroup_list = db.security_group_get_by_project(cntxt, tenant) for group in secgroup_list: user = group.user_id if not usage_by_resource.has_key(user): usage_by_resource[user] = {} # Record that this user has once used resources user_resource = usage_by_resource[user] user_resource['security_groups'] = user_resource.get('security_groups', 0) + 1 # Correct the quota usage in the database for user in usage_by_resource: for resource in resource_types: usage = usage_by_resource[user].get(resource, 0) try: db.quota_usage_update(cntxt, tenant, user, resource, in_use=usage) except exception.QuotaUsageNotFound as e: print e print 'db.quota_usage_update(cntxt, %s, %s, %s, in_use=%s)' % \ (tenant, user, resource, usage)
def get_by_filters( cls, context, filters, sort_key="created_at", sort_dir="desc", limit=None, marker=None, expected_attrs=None, use_slave=False, ): db_inst_list = db.instance_get_all_by_filters( context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=_expected_cols(expected_attrs), use_slave=use_slave, ) return _make_instance_list(context, cls(), db_inst_list, expected_attrs)
def _GET_servers(self, req, res, body): context = req.environ['nova.context'] servers = self._extract_resource_from_body(res, body, singular='server', singular_template=ServerDiskConfigTemplate(), plural='servers', plural_template=ServersDiskConfigTemplate()) # Filter out any servers that already have the key set (most likely # from a remote zone) servers = filter(lambda s: self.API_DISK_CONFIG not in s, servers) # Get DB information for servers uuids = [server['id'] for server in servers] db_servers = db.instance_get_all_by_filters(context, {'uuid': uuids}) db_servers = dict([(s['uuid'], s) for s in db_servers]) for server in servers: db_server = db_servers.get(server['id']) if db_server: value = db_server[self.INTERNAL_DISK_CONFIG] server[self.API_DISK_CONFIG] = disk_config_to_api(value) return res
def _create_load_stats(self, context, instance=None): """For each existing instance generate load stats for the compute node record. """ values = {} if instance: instances = [instance] else: self.stats.clear() # re-generating all, so clear old stats # grab all instances that are not yet DELETED filters = {'host': self.host, 'deleted': False} instances = db.instance_get_all_by_filters(context, filters) for instance in instances: self.stats.add_stats_for_instance(instance) values['current_workload'] = self.stats.calculate_workload() values['running_vms'] = self.stats.num_instances values['vcpus_used'] = self.stats.num_vcpus_used values['stats'] = self.stats return values
def get_actual_usage(cntxt, tenant): filter_object = {'deleted': '', 'project_id': tenant} instances = db.instance_get_all_by_filters(cntxt, filter_object) # calculate actual usage actual_instance_count = len(instances) actual_core_count = 0 actual_ram_count = 0 for instance in instances: actual_core_count += instance['vcpus'] actual_ram_count += instance['memory_mb'] actual_secgroup_count = len(db.security_group_get_by_project( cntxt, tenant)) if actual_secgroup_count == 0: actual_secgroup_count = 1 # Every tenant uses quota for default security group return OrderedDict( (("actual_instance_count", actual_instance_count), ("actual_core_count", actual_core_count), ("actual_ram_count", actual_ram_count), ("actual_secgroup_count", actual_secgroup_count)))
def _create_load_stats(self, context, instance=None): """For each existing instance generate load stats for the compute node record. """ values = {} if instance: instances = [instance] else: self.stats.clear() # re-generating all, so clear old stats # grab all instances that are not yet DELETED filters = {'host': self.host, 'deleted': False} instances = db.instance_get_all_by_filters(context, filters) for instance in instances: self.stats.update_stats_for_instance(instance) values['current_workload'] = self.stats.calculate_workload() values['running_vms'] = self.stats.num_instances values['vcpus_used'] = self.stats.num_vcpus_used values['stats'] = self.stats return values
def test_instance_get_all_by_filters(self): args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'} inst1 = db.instance_create(self.context, args) inst2 = db.instance_create(self.context, args) result = db.instance_get_all_by_filters(self.context, {}) self.assertTrue(2, len(result))