Beispiel #1
0
    def test_remove_stale_clients(self, mock_rpcclient, mock_create, mock_get):
        t0 = datetime.datetime(2016, 8, 9, 0, 0, 0)
        time_fixture = self.useFixture(utils_fixture.TimeFixture(t0))

        default_client = mock.Mock()
        ctxt = mock.Mock()

        cm1 = objects.CellMapping(uuid=uuids.cell_mapping1,
                                  transport_url='fake:///')
        cm2 = objects.CellMapping(uuid=uuids.cell_mapping2,
                                  transport_url='fake:///')
        cm3 = objects.CellMapping(uuid=uuids.cell_mapping3,
                                  transport_url='fake:///')
        mock_get.side_effect = [objects.InstanceMapping(cell_mapping=cm1),
                                objects.InstanceMapping(cell_mapping=cm2),
                                objects.InstanceMapping(cell_mapping=cm3),
                                objects.InstanceMapping(cell_mapping=cm3)]
        instance1 = objects.Instance(uuid=uuids.instance1)
        instance2 = objects.Instance(uuid=uuids.instance2)
        instance3 = objects.Instance(uuid=uuids.instance3)

        router = rpc.ClientRouter(default_client)
        cell1_client = router.by_instance(ctxt, instance1)
        cell2_client = router.by_instance(ctxt, instance2)

        # default client, cell1 client, cell2 client
        self.assertEqual(3, len(router.clients))
        expected = {'default': default_client,
                    uuids.cell_mapping1: cell1_client,
                    uuids.cell_mapping2: cell2_client}
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)

        # expire cell1 client and cell2 client
        time_fixture.advance_time_seconds(80)

        # add cell3 client
        cell3_client = router.by_instance(ctxt, instance3)

        router._remove_stale_clients(ctxt)

        # default client, cell3 client
        expected = {'default': default_client,
                    uuids.cell_mapping3: cell3_client}
        self.assertEqual(2, len(router.clients))
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)

        # expire cell3 client
        time_fixture.advance_time_seconds(80)

        # access cell3 client to refresh it
        cell3_client = router.by_instance(ctxt, instance3)

        router._remove_stale_clients(ctxt)

        # default client and cell3 client should be there
        self.assertEqual(2, len(router.clients))
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)
Beispiel #2
0
    def test_deleting_instance_at_the_same_time(self, mock_get_i, mock_get_im,
                                                mock_get_br):
        """This tests the scenario where another request is trying to delete
        the instance record at the same time we are, while the instance is
        booting. An example of this: while the create and delete are running at
        the same time, the delete request deletes the build request, the create
        request finds the build request already deleted when it tries to delete
        it. The create request deletes the instance record and then delete
        request tries to lookup the instance after it deletes the build
        request. Its attempt to lookup the instance fails because the create
        request already deleted it.
        """
        # First lookup at the beginning of the delete request in the
        # ServersController succeeds, second lookup to handle "delete while
        # booting" in compute/api fails after the conductor has deleted it.
        br_not_found = exception.BuildRequestNotFound(uuid=self.server['id'])
        mock_get_br.side_effect = [self.br, br_not_found]
        # Simulate the instance transitioning from having no cell assigned to
        # having a cell assigned while the delete request is being processed.
        # First lookup of the instance mapping has the instance unmapped (no
        # cell) and subsequent lookups have the instance mapped to cell1.
        no_cell_im = objects.InstanceMapping(
            context=self.ctxt, instance_uuid=self.server['id'],
            cell_mapping=None)
        has_cell_im = objects.InstanceMapping(
            context=self.ctxt, instance_uuid=self.server['id'],
            cell_mapping=self.cell_mappings['cell1'])
        mock_get_im.side_effect = [
            no_cell_im, has_cell_im, has_cell_im, has_cell_im]
        # Simulate that the instance object has been created by the conductor
        # in the create path while the delete request is being processed.
        # First lookups are before the instance has been deleted and the last
        # lookup is after the conductor has deleted the instance. Use the build
        # request to make an instance object for testing.
        i = self.br.get_new_instance(self.ctxt)
        i_not_found = exception.InstanceNotFound(instance_id=self.server['id'])
        mock_get_i.side_effect = [i, i, i, i_not_found]

        # Simulate that the conductor is running instance_destroy at the same
        # time as we are.
        def fake_instance_destroy(*args, **kwargs):
            # NOTE(melwitt): This is a misleading exception, as it is not only
            # raised when a constraint on 'host' is not met, but also when two
            # instance_destroy calls are racing. In this test, the soft delete
            # returns 0 rows affected because another request soft deleted the
            # record first.
            raise exception.ObjectActionError(
                action='destroy', reason='host changed')

        self.stub_out(
            'nova.objects.instance.Instance.destroy', fake_instance_destroy)
        # FIXME(melwitt): Delete request fails due to the AttributeError.
        ex = self.assertRaises(
            client.OpenStackApiException, self._delete_server, self.server)
        self.assertEqual(500, ex.response.status_code)
        self.assertIn('AttributeError', str(ex))
Beispiel #3
0
    def test_map_instances_duplicates(self):
        ctxt = context.RequestContext('fake-user', 'fake_project')
        cell_uuid = uuidutils.generate_uuid()
        cell_mapping = objects.CellMapping(
                ctxt, uuid=cell_uuid, name='fake',
                transport_url='fake://', database_connection='fake://')
        cell_mapping.create()
        instance_uuids = []
        for i in range(3):
            uuid = uuidutils.generate_uuid()
            instance_uuids.append(uuid)
            objects.Instance(ctxt, project_id=ctxt.project_id,
                             uuid=uuid).create()

        objects.InstanceMapping(ctxt, project_id=ctxt.project_id,
                instance_uuid=instance_uuids[0],
                cell_mapping=cell_mapping).create()

        self.commands.map_instances(cell_uuid, verbose=True)
        output = sys.stdout.getvalue().strip()

        self.assertIn('%s already mapped to cell' % instance_uuids[0], output)

        for uuid in instance_uuids:
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(ctxt,
                    uuid)
            self.assertEqual(ctxt.project_id, inst_mapping.project_id)
    def test_create(self, create_in_db):
        db_mapping = get_db_mapping()
        uuid = db_mapping['instance_uuid']
        create_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid
        mapping_obj.cell_mapping = objects.CellMapping(
            self.context, id=db_mapping['cell_mapping']['id'])
        mapping_obj.project_id = db_mapping['project_id']
        mapping_obj.user_id = db_mapping['user_id']

        mapping_obj.create()
        create_in_db.assert_called_once_with(
            self.context, {
                'instance_uuid': uuid,
                'queued_for_delete': False,
                'cell_id': db_mapping['cell_mapping']['id'],
                'project_id': db_mapping['project_id'],
                'user_id': db_mapping['user_id']
            })
        self.compare_obj(
            mapping_obj,
            db_mapping,
            subs={'cell_mapping': 'cell_id'},
            comparators={'cell_mapping': self._check_cell_map_value})
Beispiel #5
0
    def test_by_instance(self, mock_rpcclient, mock_create, mock_get):
        default_client = mock.Mock()
        cell_client = mock.Mock()
        mock_rpcclient.return_value = cell_client
        ctxt = mock.Mock()
        cm = objects.CellMapping(uuid=uuids.cell_mapping,
                                 transport_url='fake:///')
        mock_get.return_value = objects.InstanceMapping(cell_mapping=cm)
        instance = objects.Instance(uuid=uuids.instance)

        router = rpc.ClientRouter(default_client)
        client = router.by_instance(ctxt, instance)

        mock_get.assert_called_once_with(ctxt, instance.uuid)
        # verify a client was created by ClientRouter
        mock_rpcclient.assert_called_once_with(
            mock_create.return_value,
            default_client.target,
            version_cap=default_client.version_cap,
            serializer=default_client.serializer)
        # verify cell client was returned
        self.assertEqual(cell_client, client)

        # reset and check that cached client is returned the second time
        mock_rpcclient.reset_mock()
        mock_create.reset_mock()
        mock_get.reset_mock()

        client = router.by_instance(ctxt, instance)
        mock_get.assert_called_once_with(ctxt, instance.uuid)
        mock_rpcclient.assert_not_called()
        mock_create.assert_not_called()
        self.assertEqual(cell_client, client)
Beispiel #6
0
    def test_destroy(self, destroy_in_db):
        uuid = uuidutils.generate_uuid()
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid

        mapping_obj.destroy()
        destroy_in_db.assert_called_once_with(self.context, uuid)
    def test_reqspec_buildreq_instmapping_single_transaction(
            self, mock_create):
        # Simulate a DBError during an INSERT by raising an exception from the
        # InstanceMapping.create method.
        mock_create.side_effect = test.TestingException('oops')

        ctxt = nova_context.RequestContext('fake-user', 'fake-project')
        rs = objects.RequestSpec(context=ctxt, instance_uuid=uuids.inst)
        # project_id and instance cannot be None
        br = objects.BuildRequest(context=ctxt,
                                  instance_uuid=uuids.inst,
                                  project_id=ctxt.project_id,
                                  instance=objects.Instance())
        im = objects.InstanceMapping(context=ctxt, instance_uuid=uuids.inst)

        self.assertRaises(test.TestingException,
                          compute_api.API._create_reqspec_buildreq_instmapping,
                          ctxt, rs, br, im)

        # Since the instance mapping failed to INSERT, we should not have
        # written a request spec record or a build request record.
        self.assertRaises(exception.RequestSpecNotFound,
                          objects.RequestSpec.get_by_instance_uuid, ctxt,
                          uuids.inst)
        self.assertRaises(exception.BuildRequestNotFound,
                          objects.BuildRequest.get_by_instance_uuid, ctxt,
                          uuids.inst)
Beispiel #8
0
    def test_map_instances_duplicates(self):
        ctxt = context.RequestContext('fake-user', 'fake_project')
        cell_uuid = uuidutils.generate_uuid()
        cell_mapping = objects.CellMapping(ctxt,
                                           uuid=cell_uuid,
                                           name='fake',
                                           transport_url='fake://',
                                           database_connection='fake://')
        cell_mapping.create()
        instance_uuids = []
        for i in range(3):
            uuid = uuidutils.generate_uuid()
            instance_uuids.append(uuid)
            objects.Instance(ctxt, project_id=ctxt.project_id,
                             uuid=uuid).create()

        objects.InstanceMapping(ctxt,
                                project_id=ctxt.project_id,
                                instance_uuid=instance_uuids[0],
                                cell_mapping=cell_mapping).create()

        self.commands.map_instances(cell_uuid)

        for uuid in instance_uuids:
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
                ctxt, uuid)
            self.assertEqual(ctxt.project_id, inst_mapping.project_id)

        mappings = objects.InstanceMappingList.get_by_project_id(
            ctxt, ctxt.project_id)
        self.assertEqual(3, len(mappings))
Beispiel #9
0
    def setUp(self):
        super(ServerTagsPolicyTest, self).setUp()
        self.controller = server_tags.ServerTagsController()
        self.req = fakes.HTTPRequest.blank('', version='2.26')
        self.mock_get = self.useFixture(
            fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
        self.instance = fake_instance.fake_instance_obj(
            self.project_member_context,
            id=1,
            uuid=uuids.fake_id,
            vm_state=vm_states.ACTIVE,
            project_id=self.project_id)
        self.mock_get.return_value = self.instance
        inst_map = objects.InstanceMapping(
            project_id=self.project_id,
            cell_mapping=objects.CellMappingList.get_all(
                context.get_admin_context())[1])
        self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid',
                      lambda s, c, u: inst_map)

        # With legacy rule and no scope checks, all admin, project members
        # project reader or other project role(because legacy rule allow server
        # owner- having same project id and no role check) is able to perform,
        # operations on server tags.
        self.project_member_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.project_admin_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context
        ]
        self.project_reader_authorized_contexts = (
            self.project_member_authorized_contexts)
Beispiel #10
0
 def setUp(self):
     super(ServerTagsTest, self).setUp()
     self.controller = server_tags.ServerTagsController()
     inst_map = objects.InstanceMapping(
         cell_mapping=objects.CellMappingList.get_all(
             context.get_admin_context())[1])
     self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid',
                   lambda s, c, u: inst_map)
Beispiel #11
0
    def test_get_by_instance_uuid(self, uuid_from_db):
        db_mapping = get_db_mapping()
        uuid_from_db.return_value = db_mapping

        mapping_obj = objects.InstanceMapping().get_by_instance_uuid(
            self.context, db_mapping['instance_uuid'])
        uuid_from_db.assert_called_once_with(self.context,
                                             db_mapping['instance_uuid'])
        self.compare_obj(mapping_obj, db_mapping)
    def _create_instances(self, pre_newton=2, deleted=0, total=5,
                          target_cell=None):
        if not target_cell:
            target_cell = self.cells[1]

        instances = []
        with context.target_cell(self.context, target_cell) as cctxt:
            flav_dict = objects.Flavor._flavor_get_from_db(cctxt, 1)
            flavor = objects.Flavor(**flav_dict)
            for i in range(0, total):
                inst = objects.Instance(
                    context=cctxt,
                    project_id=self.api.project_id,
                    user_id=FAKE_UUID,
                    vm_state='active',
                    flavor=flavor,
                    created_at=datetime.datetime(1985, 10, 25, 1, 21, 0),
                    launched_at=datetime.datetime(1985, 10, 25, 1, 22, 0),
                    host=self.computes['compute2'].host,
                    hostname='%s-inst%i' % (target_cell.name, i))
                inst.create()

                info_cache = objects.InstanceInfoCache(context=cctxt)
                info_cache.updated_at = timeutils.utcnow()
                info_cache.network_info = network_model.NetworkInfo()
                info_cache.instance_uuid = inst.uuid
                info_cache.save()

                instances.append(inst)

                im = objects.InstanceMapping(context=cctxt,
                    project_id=inst.project_id,
                    user_id=inst.user_id,
                    instance_uuid=inst.uuid,
                    cell_mapping=target_cell)
                im.create()

        # Attach fake interfaces to instances
        network_id = list(self.neutron._networks.keys())[0]
        for i in range(0, len(instances)):
            for k in range(0, 4):
                self.api.attach_interface(instances[i].uuid,
                    {"interfaceAttachment": {"net_id": network_id}})

        with context.target_cell(self.context, target_cell) as cctxt:
            # Fake the pre-newton behaviour by removing the
            # VirtualInterfacesList objects.
            if pre_newton:
                for i in range(0, pre_newton):
                    _delete_vif_list(cctxt, instances[i].uuid)

        if deleted:
            # Delete from the end of active instances list
            for i in range(total - deleted, total):
                instances[i].destroy()

        self.instances += instances
    def test_get_by_project_id(self):
        ctxt = context.RequestContext()
        cell1 = objects.CellMapping.get_by_uuid(ctxt, create_mapping().uuid)
        cell2 = objects.CellMapping.get_by_uuid(ctxt, create_mapping().uuid)
        cell3 = objects.CellMapping.get_by_uuid(ctxt, create_mapping().uuid)
        cells = [cell1, cell2, cell3]

        # Proj1 is all in one cell
        for i in range(0, 5):
            uuid = uuidutils.generate_uuid()
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=uuid,
                                         cell_mapping=cell1,
                                         project_id='proj1')
            im.create()

        # Proj2 is in the first two cells
        for i in range(0, 5):
            uuid = uuidutils.generate_uuid()
            cell = cells[i % 2]
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=uuid,
                                         cell_mapping=cell,
                                         project_id='proj2')
            im.create()

        # One mapping has no cell. This helps ensure that our query
        # filters out any mappings that aren't tied to a cell.
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=uuidutils.generate_uuid(),
                                     cell_mapping=None,
                                     project_id='proj2')
        im.create()

        # Proj1 should only be in cell1 and we should only get back
        # a single mapping for it
        cells = objects.CellMappingList.get_by_project_id(ctxt, 'proj1')
        self.assertEqual(1, len(cells))
        self.assertEqual(cell1.uuid, cells[0].uuid)

        cells = objects.CellMappingList.get_by_project_id(ctxt, 'proj2')
        self.assertEqual(2, len(cells))
        self.assertEqual(sorted([cell1.uuid, cell2.uuid]),
                         sorted([cm.uuid for cm in cells]))
Beispiel #14
0
    def test_get_by_instance_uuid_cell_mapping_none(self, uuid_from_db):
        db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
        uuid_from_db.return_value = db_mapping

        mapping_obj = objects.InstanceMapping().get_by_instance_uuid(
                self.context, db_mapping['instance_uuid'])
        uuid_from_db.assert_called_once_with(self.context,
                db_mapping['instance_uuid'])
        self.compare_obj(mapping_obj, db_mapping,
                         subs={'cell_mapping': 'cell_id'})
 def test_get_sorted_with_purged_instance(self):
     """Test that we handle a mapped but purged instance."""
     im = objects.InstanceMapping(self.context,
                                  instance_uuid=uuids.missing,
                                  project_id=self.context.project_id,
                                  user_id=self.context.user_id,
                                  cell=self.cells[0])
     im.create()
     self.assertRaises(
         exception.MarkerNotFound, list,
         instance_list.get_instances_sorted(self.context, {}, None,
                                            uuids.missing, [], None, None))
    def test_detail_with_cell_failures(self, mock_sg,
                                       mock_get_by_instance_uuids):

        mock_get_by_instance_uuids.return_value = [
            objects.InstanceMapping(instance_uuid=UUID1,
                                    cell_mapping=objects.CellMapping(
                                        uuid=uuids.cell1,
                                        transport_url='fake://nowhere/',
                                        database_connection=uuids.cell1)),
            objects.InstanceMapping(instance_uuid=UUID2,
                                    cell_mapping=objects.CellMapping(
                                        uuid=uuids.cell2,
                                        transport_url='fake://nowhere/',
                                        database_connection=uuids.cell2))
        ]
        bdm = fake_bdms_get_all_by_instance_uuids()
        fake_bdm = fake_block_device.fake_bdm_object(
            nova_context.RequestContext, bdm[0])
        mock_sg.return_value = {
            uuids.cell1: {
                UUID1: [fake_bdm]
            },
            uuids.cell2: nova_context.raised_exception_sentinel
        }

        res = self._make_request('/detail')
        mock_get_by_instance_uuids.assert_called_once_with(
            test.MatchType(nova_context.RequestContext), [UUID1, UUID2])

        self.assertEqual(200, res.status_int)

        # we would get an empty list for the second instance
        # which is in the down cell, however this would printed
        # in the logs.
        for i, server in enumerate(self._get_servers(res.body)):
            actual = server.get('%svolumes_attached' % self.prefix)
            if i == 0:
                self.assertEqual(self.exp_volumes_detail[i], actual)
            else:
                self.assertEqual([], actual)
    def test_detail(self, mock_get_by_instance_uuids):
        mock_get_by_instance_uuids.return_value = [
            objects.InstanceMapping(instance_uuid=UUID1,
                                    cell_mapping=objects.CellMapping(
                                        uuid=uuids.cell1,
                                        transport_url='fake://nowhere/',
                                        database_connection=uuids.cell1)),
            objects.InstanceMapping(instance_uuid=UUID2,
                                    cell_mapping=objects.CellMapping(
                                        uuid=uuids.cell1,
                                        transport_url='fake://nowhere/',
                                        database_connection=uuids.cell1))
        ]

        res = self._make_request('/detail')
        mock_get_by_instance_uuids.assert_called_once_with(
            test.MatchType(nova_context.RequestContext), [UUID1, UUID2])

        self.assertEqual(200, res.status_int)
        for i, server in enumerate(self._get_servers(res.body)):
            actual = server.get('%svolumes_attached' % self.prefix)
            self.assertEqual(self.exp_volumes_detail[i], actual)
Beispiel #18
0
    def test_save(self, save_in_db):
        db_mapping = get_db_mapping()
        uuid = db_mapping['instance_uuid']
        save_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid
        mapping_obj.cell_id = 3

        mapping_obj.save()
        save_in_db.assert_called_once_with(self.context,
                db_mapping['instance_uuid'],
                {'cell_id': 3,
                 'instance_uuid': uuid})
        self.compare_obj(mapping_obj, db_mapping)
Beispiel #19
0
    def test_create(self, create_in_db):
        db_mapping = get_db_mapping()
        uuid = db_mapping['instance_uuid']
        create_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid
        mapping_obj.cell_id = db_mapping['cell_id']
        mapping_obj.project_id = db_mapping['project_id']

        mapping_obj.create()
        create_in_db.assert_called_once_with(self.context,
                {'instance_uuid': uuid,
                 'cell_id': db_mapping['cell_id'],
                 'project_id': db_mapping['project_id']})
        self.compare_obj(mapping_obj, db_mapping)
Beispiel #20
0
    def setUp(self):
        super(ServerTagsPolicyTest, self).setUp()
        self.controller = server_tags.ServerTagsController()
        self.req = fakes.HTTPRequest.blank('', version='2.26')
        self.mock_get = self.useFixture(
            fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
        self.instance = fake_instance.fake_instance_obj(
            self.project_member_context,
            id=1,
            uuid=uuids.fake_id,
            vm_state=vm_states.ACTIVE,
            project_id=self.project_id)
        self.mock_get.return_value = self.instance
        inst_map = objects.InstanceMapping(
            project_id=self.project_id,
            cell_mapping=objects.CellMappingList.get_all(
                context.get_admin_context())[1])
        self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid',
                      lambda s, c, u: inst_map)

        # Check that admin or and server owner is able to perform
        # operations on server tags.
        self.admin_or_owner_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.project_admin_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context
        ]
        # Check that non-admin/owner is not able to perform operations
        # on server tags
        self.admin_or_owner_unauthorized_contexts = [
            self.system_member_context, self.system_reader_context,
            self.system_foo_context, self.other_project_member_context,
            self.other_project_reader_context
        ]
        # Check that reader or and server owner is able to perform operations
        # on server tags.
        self.reader_or_owner_authorized_contexts = [
            self.legacy_admin_context, self.system_admin_context,
            self.system_member_context, self.system_reader_context,
            self.project_admin_context, self.project_member_context,
            self.project_reader_context, self.project_foo_context
        ]
        # Check that non-reader/owner is not able to perform operations
        # on server tags.
        self.reader_or_owner_unauthorized_contexts = [
            self.system_foo_context, self.other_project_member_context,
            self.other_project_reader_context
        ]
Beispiel #21
0
 def _create_instance(self, ctx, cell):
     with context.target_cell(ctx, cell) as cctx:
         instance = objects.Instance(context=cctx,
                                     image_ref=uuidsentinel.fake_image_ref,
                                     node='node1', reservation_id='a',
                                     host='host1', project_id='fake',
                                     vm_state='fake',
                                     system_metadata={'key': 'value'})
         instance.create()
     im = objects.InstanceMapping(context=ctx,
                                  project_id=ctx.project_id,
                                  user_id=ctx.user_id,
                                  cell_mapping=cell,
                                  instance_uuid=instance.uuid)
     im.create()
     return instance
Beispiel #22
0
 def _create_instance(self, context):
     instance = objects.Instance(context=context,
                                 image_ref=uuidsentinel.fake_image_ref,
                                 node='node1',
                                 reservation_id='a',
                                 host='host1',
                                 project_id='fake',
                                 vm_state='fake',
                                 system_metadata={'key': 'value'})
     instance.create()
     im = objects.InstanceMapping(context=context,
                                  project_id=context.project_id,
                                  user_id=context.user_id,
                                  instance_uuid=instance.uuid)
     im.create()
     return instance
Beispiel #23
0
    def test_create_cell_mapping_with_qfd_true(self, create_in_db):
        db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
        create_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = db_mapping['instance_uuid']
        mapping_obj.cell_mapping = None
        mapping_obj.project_id = db_mapping['project_id']
        mapping_obj.queued_for_delete = True

        mapping_obj.create()
        create_in_db.assert_called_once_with(
            self.context, {
                'instance_uuid': db_mapping['instance_uuid'],
                'queued_for_delete': True,
                'project_id': db_mapping['project_id']
            })
    def setUp(self):
        super(TestInstanceListObjects, self).setUp()

        self.context = context.RequestContext('fake', 'fake')
        self.num_instances = 3
        self.instances = []

        start = datetime.datetime(1985, 10, 25, 1, 21, 0)
        dt = start
        spread = datetime.timedelta(minutes=10)

        cells = objects.CellMappingList.get_all(self.context)
        # Create three instances in each of the real cells. Leave the
        # first cell empty to make sure we don't break with an empty
        # one
        for cell in cells[1:]:
            for i in range(0, self.num_instances):
                with context.target_cell(self.context, cell) as cctx:
                    inst = objects.Instance(context=cctx,
                                            project_id=self.context.project_id,
                                            user_id=self.context.user_id,
                                            created_at=start,
                                            launched_at=dt,
                                            instance_type_id=i,
                                            hostname='%s-inst%i' %
                                            (cell.name, i))
                    inst.create()
                    if i % 2 == 0:
                        # Make some faults for this instance
                        for n in range(0, i + 1):
                            msg = 'fault%i-%s' % (n, inst.hostname)
                            f = objects.InstanceFault(context=cctx,
                                                      instance_uuid=inst.uuid,
                                                      code=i,
                                                      message=msg,
                                                      details='fake',
                                                      host='fakehost')
                            f.create()

                self.instances.append(inst)
                im = objects.InstanceMapping(context=self.context,
                                             project_id=inst.project_id,
                                             user_id=inst.user_id,
                                             instance_uuid=inst.uuid,
                                             cell_mapping=cell)
                im.create()
                dt += spread
Beispiel #25
0
    def test_save(self, save_in_db):
        db_mapping = get_db_mapping()
        uuid = db_mapping['instance_uuid']
        save_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid
        mapping_obj.cell_mapping = objects.CellMapping(self.context, id=42)

        mapping_obj.save()
        save_in_db.assert_called_once_with(self.context,
                db_mapping['instance_uuid'],
                {'cell_id': mapping_obj.cell_mapping.id,
                 'instance_uuid': uuid})
        self.compare_obj(mapping_obj, db_mapping,
                         subs={'cell_mapping': 'cell_id'},
                         comparators={
                             'cell_mapping': self._check_cell_map_value})
Beispiel #26
0
    def test_create_cell_mapping_none(self, create_in_db):
        db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
        uuid = db_mapping['instance_uuid']
        create_in_db.return_value = db_mapping
        mapping_obj = objects.InstanceMapping(self.context)
        mapping_obj.instance_uuid = uuid
        mapping_obj.cell_mapping = None
        mapping_obj.project_id = db_mapping['project_id']
        mapping_obj.user_id = db_mapping['user_id']

        mapping_obj.create()
        create_in_db.assert_called_once_with(self.context,
                {'instance_uuid': uuid,
                 'queued_for_delete': False,
                 'project_id': db_mapping['project_id'],
                 'user_id': db_mapping['user_id']})
        self.compare_obj(mapping_obj, db_mapping,
                         subs={'cell_mapping': 'cell_id'})
        self.assertIsNone(mapping_obj.cell_mapping)
Beispiel #27
0
 def test_cell_mapping_nullable(self):
     mapping_obj = objects.InstanceMapping(self.context)
     # Just ensure this doesn't raise an exception
     mapping_obj.cell_mapping = None
Beispiel #28
0
class MigrationTaskTestCase(test.NoDBTestCase):
    def setUp(self):
        super(MigrationTaskTestCase, self).setUp()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = FakeContext(self.user_id, self.project_id)
        # Normally RequestContext.cell_uuid would be set when targeting
        # the context in nova.conductor.manager.targets_cell but we just
        # fake it here.
        self.context.cell_uuid = uuids.cell1
        self.flavor = fake_flavor.fake_flavor_obj(self.context)
        self.flavor.extra_specs = {'extra_specs': 'fake'}
        inst = fake_instance.fake_db_instance(image_ref='image_ref',
                                              instance_type=self.flavor)
        inst_object = objects.Instance(
            flavor=self.flavor,
            numa_topology=None,
            pci_requests=None,
            system_metadata={'image_hw_disk_bus': 'scsi'})
        self.instance = objects.Instance._from_db_object(
            self.context, inst_object, inst, [])
        self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
        self.host_lists = [[
            objects.Selection(service_host="host1",
                              nodename="node1",
                              cell_uuid=uuids.cell1)
        ]]
        self.filter_properties = {
            'limits': {},
            'retry': {
                'num_attempts': 1,
                'hosts': [['host1', 'node1']]
            }
        }
        self.reservations = []
        self.clean_shutdown = True

        _p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
        self.heal_reqspec_is_bfv_mock = _p.start()
        self.addCleanup(_p.stop)

        _p = mock.patch('nova.objects.RequestSpec.ensure_network_information')
        self.ensure_network_information_mock = _p.start()
        self.addCleanup(_p.stop)

        self.mock_network_api = mock.Mock()

    def _generate_task(self):
        return migrate.MigrationTask(self.context,
                                     self.instance,
                                     self.flavor,
                                     self.request_spec,
                                     self.clean_shutdown,
                                     compute_rpcapi.ComputeAPI(),
                                     query.SchedulerQueryClient(),
                                     report.SchedulerReportClient(),
                                     host_list=None,
                                     network_api=self.mock_network_api)

    @mock.patch.object(objects.MigrationList, 'get_by_filters')
    @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
    @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
    @mock.patch('nova.objects.Migration.save')
    @mock.patch('nova.objects.Migration.create')
    @mock.patch('nova.objects.Service.get_minimum_version_multi')
    @mock.patch('nova.availability_zones.get_host_availability_zone')
    @mock.patch.object(scheduler_utils, 'setup_instance_group')
    @mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
    @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
    @mock.patch('nova.conductor.tasks.cross_cell_migrate.'
                'CrossCellMigrationTask.execute')
    def _test_execute(self,
                      cross_cell_exec_mock,
                      prep_resize_mock,
                      sel_dest_mock,
                      sig_mock,
                      az_mock,
                      gmv_mock,
                      cm_mock,
                      sm_mock,
                      cn_mock,
                      rc_mock,
                      gbf_mock,
                      requested_destination=False,
                      same_cell=True):
        sel_dest_mock.return_value = self.host_lists
        az_mock.return_value = 'myaz'
        gbf_mock.return_value = objects.MigrationList()
        mock_get_resources = \
            self.mock_network_api.get_requested_resource_for_instance
        mock_get_resources.return_value = []

        if requested_destination:
            self.request_spec.requested_destination = objects.Destination(
                host='target_host',
                node=None,
                allow_cross_cell_move=not same_cell)
            self.request_spec.retry = objects.SchedulerRetries.from_dict(
                self.context, self.filter_properties['retry'])
            self.filter_properties.pop('retry')
            self.filter_properties['requested_destination'] = (
                self.request_spec.requested_destination)

        task = self._generate_task()
        gmv_mock.return_value = 23

        # We just need this hook point to set a uuid on the
        # migration before we use it for teardown
        def set_migration_uuid(*a, **k):
            task._migration.uuid = uuids.migration
            return mock.MagicMock()

        # NOTE(danms): It's odd to do this on cn_mock, but it's just because
        # of when we need to have it set in the flow and where we have an easy
        # place to find it via self.migration.
        cn_mock.side_effect = set_migration_uuid

        selection = self.host_lists[0][0]
        with mock.patch.object(task,
                               '_is_selected_host_in_source_cell',
                               return_value=same_cell) as _is_source_cell_mock:
            task.execute()
            _is_source_cell_mock.assert_called_once_with(selection)

        self.ensure_network_information_mock.assert_called_once_with(
            self.instance)
        self.heal_reqspec_is_bfv_mock.assert_called_once_with(
            self.context, self.request_spec, self.instance)
        sig_mock.assert_called_once_with(self.context, self.request_spec)
        task.query_client.select_destinations.assert_called_once_with(
            self.context,
            self.request_spec, [self.instance.uuid],
            return_objects=True,
            return_alternates=True)

        if same_cell:
            prep_resize_mock.assert_called_once_with(
                self.context,
                self.instance,
                self.request_spec.image,
                self.flavor,
                selection.service_host,
                task._migration,
                request_spec=self.request_spec,
                filter_properties=self.filter_properties,
                node=selection.nodename,
                clean_shutdown=self.clean_shutdown,
                host_list=[])
            az_mock.assert_called_once_with(self.context, 'host1')
            cross_cell_exec_mock.assert_not_called()
        else:
            cross_cell_exec_mock.assert_called_once_with()
            az_mock.assert_not_called()
            prep_resize_mock.assert_not_called()

        self.assertIsNotNone(task._migration)

        old_flavor = self.instance.flavor
        new_flavor = self.flavor
        self.assertEqual(old_flavor.id, task._migration.old_instance_type_id)
        self.assertEqual(new_flavor.id, task._migration.new_instance_type_id)
        self.assertEqual('pre-migrating', task._migration.status)
        self.assertEqual(self.instance.uuid, task._migration.instance_uuid)
        self.assertEqual(self.instance.host, task._migration.source_compute)
        self.assertEqual(self.instance.node, task._migration.source_node)
        if old_flavor.id != new_flavor.id:
            self.assertEqual('resize', task._migration.migration_type)
        else:
            self.assertEqual('migration', task._migration.migration_type)

        task._migration.create.assert_called_once_with()

        if requested_destination:
            self.assertIsNone(self.request_spec.retry)
            self.assertIn('cell', self.request_spec.requested_destination)
            self.assertIsNotNone(self.request_spec.requested_destination.cell)
            self.assertEqual(
                not same_cell,
                self.request_spec.requested_destination.allow_cross_cell_move)

        mock_get_resources.assert_called_once_with(self.context,
                                                   self.instance.uuid)
        self.assertEqual([], self.request_spec.requested_resources)

    def test_execute(self):
        self._test_execute()

    def test_execute_with_destination(self):
        self._test_execute(requested_destination=True)

    def test_execute_resize(self):
        self.flavor = self.flavor.obj_clone()
        self.flavor.id = 3
        self._test_execute()

    def test_execute_same_cell_false(self):
        """Tests the execute() scenario that the RequestSpec allows cross
        cell move and the selected target host is in another cell so
        CrossCellMigrationTask is executed.
        """
        self._test_execute(same_cell=False)

    @mock.patch.object(objects.MigrationList, 'get_by_filters')
    @mock.patch('nova.conductor.tasks.migrate.revert_allocation_for_migration')
    @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
    @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
    @mock.patch('nova.objects.Migration.save')
    @mock.patch('nova.objects.Migration.create')
    @mock.patch('nova.objects.Service.get_minimum_version_multi')
    @mock.patch('nova.availability_zones.get_host_availability_zone')
    @mock.patch.object(scheduler_utils, 'setup_instance_group')
    @mock.patch.object(query.SchedulerQueryClient, 'select_destinations')
    @mock.patch.object(compute_rpcapi.ComputeAPI, 'prep_resize')
    def test_execute_rollback(self, prep_resize_mock, sel_dest_mock, sig_mock,
                              az_mock, gmv_mock, cm_mock, sm_mock, cn_mock,
                              rc_mock, mock_ra, mock_gbf):
        sel_dest_mock.return_value = self.host_lists
        az_mock.return_value = 'myaz'
        task = self._generate_task()
        gmv_mock.return_value = 23
        mock_gbf.return_value = objects.MigrationList()
        mock_get_resources = \
            self.mock_network_api.get_requested_resource_for_instance
        mock_get_resources.return_value = []

        # We just need this hook point to set a uuid on the
        # migration before we use it for teardown
        def set_migration_uuid(*a, **k):
            task._migration.uuid = uuids.migration
            return mock.MagicMock()

        # NOTE(danms): It's odd to do this on cn_mock, but it's just because
        # of when we need to have it set in the flow and where we have an easy
        # place to find it via self.migration.
        cn_mock.side_effect = set_migration_uuid

        prep_resize_mock.side_effect = test.TestingException
        task._held_allocations = mock.sentinel.allocs
        self.assertRaises(test.TestingException, task.execute)
        self.assertIsNotNone(task._migration)
        task._migration.create.assert_called_once_with()
        task._migration.save.assert_called_once_with()
        self.assertEqual('error', task._migration.status)
        mock_ra.assert_called_once_with(task.context, task._source_cn,
                                        task.instance, task._migration)
        mock_get_resources.assert_called_once_with(self.context,
                                                   self.instance.uuid)

    @mock.patch.object(scheduler_utils, 'claim_resources')
    @mock.patch.object(context.RequestContext, 'elevated')
    def test_execute_reschedule(self, mock_elevated, mock_claim):
        report_client = report.SchedulerReportClient()
        # setup task for re-schedule
        alloc_req = {
            "allocations": {
                uuids.host1: {
                    "resources": {
                        "VCPU": 1,
                        "MEMORY_MB": 1024,
                        "DISK_GB": 100
                    }
                }
            }
        }
        alternate_selection = objects.Selection(
            service_host="host1",
            nodename="node1",
            cell_uuid=uuids.cell1,
            allocation_request=jsonutils.dumps(alloc_req),
            allocation_request_version='1.19')
        task = migrate.MigrationTask(self.context,
                                     self.instance,
                                     self.flavor,
                                     self.request_spec,
                                     self.clean_shutdown,
                                     compute_rpcapi.ComputeAPI(),
                                     query.SchedulerQueryClient(),
                                     report_client,
                                     host_list=[alternate_selection],
                                     network_api=self.mock_network_api)
        mock_claim.return_value = True

        actual_selection = task._reschedule()

        self.assertIs(alternate_selection, actual_selection)
        mock_claim.assert_called_once_with(mock_elevated.return_value,
                                           report_client, self.request_spec,
                                           self.instance.uuid, alloc_req,
                                           '1.19')

    @mock.patch.object(scheduler_utils, 'fill_provider_mapping')
    @mock.patch.object(scheduler_utils, 'claim_resources')
    @mock.patch.object(context.RequestContext, 'elevated')
    def test_execute_reschedule_claim_fails_no_more_alternate(
            self, mock_elevated, mock_claim, mock_fill_provider_mapping):
        report_client = report.SchedulerReportClient()
        # set up the task for re-schedule
        alloc_req = {
            "allocations": {
                uuids.host1: {
                    "resources": {
                        "VCPU": 1,
                        "MEMORY_MB": 1024,
                        "DISK_GB": 100
                    }
                }
            }
        }
        alternate_selection = objects.Selection(
            service_host="host1",
            nodename="node1",
            cell_uuid=uuids.cell1,
            allocation_request=jsonutils.dumps(alloc_req),
            allocation_request_version='1.19')
        task = migrate.MigrationTask(self.context,
                                     self.instance,
                                     self.flavor,
                                     self.request_spec,
                                     self.clean_shutdown,
                                     compute_rpcapi.ComputeAPI(),
                                     query.SchedulerQueryClient(),
                                     report_client,
                                     host_list=[alternate_selection],
                                     network_api=self.mock_network_api)
        mock_claim.return_value = False

        self.assertRaises(exception.MaxRetriesExceeded, task._reschedule)

        mock_claim.assert_called_once_with(mock_elevated.return_value,
                                           report_client, self.request_spec,
                                           self.instance.uuid, alloc_req,
                                           '1.19')
        mock_fill_provider_mapping.assert_not_called()

    @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
                return_value=objects.InstanceMapping(
                    cell_mapping=objects.CellMapping(uuid=uuids.cell1)))
    @mock.patch('nova.conductor.tasks.migrate.LOG.debug')
    def test_set_requested_destination_cell_allow_cross_cell_resize_true(
            self, mock_debug, mock_get_im):
        """Tests the scenario that the RequestSpec is configured for
        allow_cross_cell_resize=True.
        """
        task = self._generate_task()
        legacy_props = self.request_spec.to_legacy_filter_properties_dict()
        self.request_spec.requested_destination = objects.Destination(
            allow_cross_cell_move=True)
        task._set_requested_destination_cell(legacy_props)
        mock_get_im.assert_called_once_with(self.context, self.instance.uuid)
        mock_debug.assert_called_once()
        self.assertIn('Allowing migration from cell',
                      mock_debug.call_args[0][0])
        self.assertEqual(mock_get_im.return_value.cell_mapping,
                         self.request_spec.requested_destination.cell)

    @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
                return_value=objects.InstanceMapping(
                    cell_mapping=objects.CellMapping(uuid=uuids.cell1)))
    @mock.patch('nova.conductor.tasks.migrate.LOG.debug')
    def test_set_requested_destination_cell_allow_cross_cell_resize_true_host(
            self, mock_debug, mock_get_im):
        """Tests the scenario that the RequestSpec is configured for
        allow_cross_cell_resize=True and there is a requested target host.
        """
        task = self._generate_task()
        legacy_props = self.request_spec.to_legacy_filter_properties_dict()
        self.request_spec.requested_destination = objects.Destination(
            allow_cross_cell_move=True, host='fake-host')
        task._set_requested_destination_cell(legacy_props)
        mock_get_im.assert_called_once_with(self.context, self.instance.uuid)
        mock_debug.assert_called_once()
        self.assertIn('Not restricting cell', mock_debug.call_args[0][0])
        self.assertIsNone(self.request_spec.requested_destination.cell)

    @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
                return_value=objects.InstanceMapping(
                    cell_mapping=objects.CellMapping(uuid=uuids.cell1)))
    @mock.patch('nova.conductor.tasks.migrate.LOG.debug')
    def test_set_requested_destination_cell_allow_cross_cell_resize_false(
            self, mock_debug, mock_get_im):
        """Tests the scenario that the RequestSpec is configured for
        allow_cross_cell_resize=False.
        """
        task = self._generate_task()
        legacy_props = self.request_spec.to_legacy_filter_properties_dict()
        # We don't have to explicitly set RequestSpec.requested_destination
        # since _set_requested_destination_cell will do that and the
        # Destination object will default allow_cross_cell_move to False.
        task._set_requested_destination_cell(legacy_props)
        mock_get_im.assert_called_once_with(self.context, self.instance.uuid)
        mock_debug.assert_called_once()
        self.assertIn('Restricting to cell', mock_debug.call_args[0][0])

    def test_is_selected_host_in_source_cell_true(self):
        """Tests the scenario that the host Selection from the scheduler is in
        the same cell as the instance.
        """
        task = self._generate_task()
        selection = objects.Selection(cell_uuid=self.context.cell_uuid)
        self.assertTrue(task._is_selected_host_in_source_cell(selection))

    def test_is_selected_host_in_source_cell_false(self):
        """Tests the scenario that the host Selection from the scheduler is
        not in the same cell as the instance.
        """
        task = self._generate_task()
        selection = objects.Selection(cell_uuid=uuids.cell2, service_host='x')
        self.assertFalse(task._is_selected_host_in_source_cell(selection))
Beispiel #29
0
    def test_instances_cores_ram_count(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=2,
                                        memory_mb=512)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping1,
                                         project_id='fake-project')
            im.create()

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=4,
                                        memory_mb=1024)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping2,
                                         project_id='fake-project')
            im.create()

        # Create an instance in cell2 for a different user
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='other-fake-user',
                                        vcpus=4,
                                        memory_mb=1024)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping2,
                                         project_id='fake-project')
            im.create()

        # Count instances, cores, and ram across cells
        count = quota._instances_cores_ram_count(ctxt,
                                                 'fake-project',
                                                 user_id='fake-user')

        self.assertEqual(3, count['project']['instances'])
        self.assertEqual(10, count['project']['cores'])
        self.assertEqual(2560, count['project']['ram'])
        self.assertEqual(2, count['user']['instances'])
        self.assertEqual(6, count['user']['cores'])
        self.assertEqual(1536, count['user']['ram'])
Beispiel #30
0
    def test_server_group_members_count_by_user(self, uid_qfd_populated,
                                                mock_uid_qfd_populated,
                                                mock_warn_log):
        mock_uid_qfd_populated.return_value = uid_qfd_populated
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create a server group the instances will use.
        group = objects.InstanceGroup(context=ctxt)
        group.project_id = ctxt.project_id
        group.user_id = ctxt.user_id
        group.create()
        instance_uuids = []

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping1.id)
        im.create()

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping2.id)
        im.create()

        # Create an instance that is queued for delete in cell2. It should not
        # be counted
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance.destroy()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping2.id,
                                     queued_for_delete=True)
        im.create()

        # Add the uuids to the group
        objects.InstanceGroup.add_members(ctxt, group.uuid, instance_uuids)
        # add_members() doesn't add the members to the object field
        group.members.extend(instance_uuids)

        # Count server group members from instance mappings or cell databases,
        # depending on whether the user_id/queued_for_delete data migration has
        # been completed.
        count = quota._server_group_count_members_by_user(
            ctxt, group, 'fake-user')

        self.assertEqual(2, count['user']['server_group_members'])

        if uid_qfd_populated:
            # Did not log a warning about falling back to legacy count.
            mock_warn_log.assert_not_called()
        else:
            # Logged a warning about falling back to legacy count.
            mock_warn_log.assert_called_once()