def test_populate_user_id_instance_get_fail(self, mock_inst_get): cells = [] celldbs = fixtures.CellDatabases() # Create two cell databases and map them for uuid in (uuidsentinel.cell1, uuidsentinel.cell2): cm = cell_mapping.CellMapping(context=self.context, uuid=uuid, database_connection=uuid, transport_url='fake://') cm.create() cells.append(cm) celldbs.add_cell_database(uuid) self.useFixture(celldbs) # Create one instance per cell for cell in cells: with context.target_cell(self.context, cell) as cctxt: inst = instance.Instance( cctxt, project_id=self.context.project_id, user_id=self.context.user_id) inst.create() create_mapping(project_id=self.context.project_id, user_id=None, cell_id=cell.id, instance_uuid=inst.uuid) # Simulate the first cell is down/has some error mock_inst_get.side_effect = [test.TestingException(), instance.InstanceList(objects=[inst])] found, done = instance_mapping.populate_user_id(self.context, 1000) # Verify we continue to the next cell when a down/error cell is # encountered. self.assertEqual(2, found) self.assertEqual(1, done)
def fake_compute_get_all(*args, **kwargs): base = { 'id': 1, 'description': 'foo', 'user_id': 'bar', 'project_id': 'baz', 'deleted': False, 'deleted_at': None, 'updated_at': None, 'created_at': None } db_list = [ fakes.stub_instance(1, uuid=UUID1, security_groups=[ dict(base, **{'name': 'fake-0-0'}), dict(base, **{'name': 'fake-0-1'}) ]), fakes.stub_instance(2, uuid=UUID2, security_groups=[ dict(base, **{'name': 'fake-1-0'}), dict(base, **{'name': 'fake-1-1'}) ]) ] return instance_obj._make_instance_list( args[1], instance_obj.InstanceList(), db_list, ['metadata', 'system_metadata', 'security_groups', 'info_cache'])
def fake_compute_get_all(*args, **kwargs): db_list = [ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE), fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE), ] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def fake_compute_get_all(*args, **kwargs): db_list = [ fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"), fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2") ] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def fake_compute_get_all(*args, **kwargs): inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host", vm_state=vm_states.ACTIVE) inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host", vm_state=vm_states.ACTIVE) db_list = [inst1, inst2] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def fake_compute_get_all(*args, **kwargs): db_list = [ fakes.stub_instance(2, uuid=UUID1, launched_at=DATE2, terminated_at=DATE3), fakes.stub_instance(3, uuid=UUID2, launched_at=DATE1, terminated_at=DATE3), ] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def fake_compute_get_all(*args, **kwargs): db_list = [ fakes.stub_instance(1, uuid=UUID1, task_state="task-1", vm_state="vm-1", power_state=1), fakes.stub_instance(2, uuid=UUID2, task_state="task-2", vm_state="vm-2", power_state=2), ] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def test_fill_faults(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids') inst1 = instance.Instance() inst1.uuid = 'uuid1' inst2 = instance.Instance() inst2.uuid = 'uuid2' insts = [inst1, inst2] for inst in insts: inst.obj_reset_changes() db_faults = { 'uuid1': [{ 'id': 123, 'instance_uuid': 'uuid1', 'code': 456, 'message': 'Fake message', 'details': 'No details', 'host': 'foo', 'deleted': False, 'deleted_at': None, 'updated_at': None, 'created_at': None, }] } db.instance_fault_get_by_instance_uuids( ctxt, [x.uuid for x in insts], ).AndReturn(db_faults) self.mox.ReplayAll() inst_list = instance.InstanceList() inst_list._context = ctxt inst_list.objects = insts faulty = inst_list.fill_faults() self.assertEqual(faulty, ['uuid1']) self.assertEqual(inst_list[0].fault.message, db_faults['uuid1'][0]['message']) self.assertEqual(inst_list[1].fault, None) for inst in inst_list: self.assertEqual(inst.obj_what_changed(), set())
def test_sync_power_states_instance_not_found(self): db_instance = fake_instance.fake_db_instance() ctxt = context.get_admin_context() instance_list = instance_obj._make_instance_list( ctxt, instance_obj.InstanceList(), [db_instance], None) instance = instance_list[0] self.mox.StubOutWithMock(instance_obj.InstanceList, 'get_by_host') self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances') self.mox.StubOutWithMock(vm_utils, 'lookup') self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state') instance_obj.InstanceList.get_by_host( ctxt, self.compute.host, use_slave=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) self.compute._sync_instance_power_state(ctxt, instance, power_state.NOSTATE) self.mox.ReplayAll() self.compute._sync_power_states(ctxt)
def fake_compute_get_all(*args, **kwargs): db_list = [fakes.stub_instance(1), fakes.stub_instance(2)] fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list(args[1], instance_obj.InstanceList(), db_list, fields)
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. status = search_opts.pop('status', None) if status is not None: vm_state, task_state = common.task_and_vm_state_from_status(status) if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes_since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes_since']) except ValueError: msg = _('Invalid changes_since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes_since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes_since' is specified, because 'changes_since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes_since' not in search_opts: # No 'changes_since', so we only want non-deleted servers search_opts['deleted'] = False if 'changes_since' in search_opts: search_opts['changes-since'] = search_opts.pop('changes_since') if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) # If tenant_id is passed as a search parameter this should # imply that all_tenants is also enabled unless explicitly # disabled. Note that the tenant_id parameter is filtered out # by remove_invalid_options above unless the requestor is an # admin. if 'tenant_id' in search_opts and not 'all_tenants' in search_opts: # We do not need to add the all_tenants flag if the tenant # id associated with the token is the tenant id # specified. This is done so a request that does not need # the all_tenants flag does not fail because of lack of # policy permission for compute:get_all_tenants when it # doesn't actually need it. if context.project_id != search_opts.get('tenant_id'): search_opts['all_tenants'] = 1 # If all tenants is passed with 0 or false as the value # then remove it from the search options. Nothing passed as # the value for all_tenants is considered to enable the feature all_tenants = search_opts.get('all_tenants') if all_tenants: try: if not strutils.bool_from_string(all_tenants, True): del search_opts['all_tenants'] except ValueError as err: raise exception.InvalidInput(str(err)) if 'all_tenants' in search_opts: policy.enforce(context, 'compute:get_all_tenants', { 'project_id': context.project_id, 'user_id': context.user_id }) del search_opts['all_tenants'] else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) try: instance_list = self.compute_api.get_all( context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=['pci_devices']) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: log_msg = _("Flavor '%s' could not be found ") LOG.debug(log_msg, search_opts['flavor']) # TODO(mriedem): Move to ObjectListBase.__init__ for empty lists. instance_list = instance_obj.InstanceList(objects=[]) if is_detail: instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
def get_all(*args, **kwargs): fields = instance_obj.INSTANCE_DEFAULT_FIELDS return instance_obj._make_instance_list( args[1], instance_obj.InstanceList(), instances, fields)