Esempio n. 1
0
 def test_fixture_when_explicitly_passing_down_cell_mappings(self):
     # The test setup creates two cell mappings (cell0 and cell1) by
     # default. We'll create one instance per cell and pass cell0 as
     # the down cell. We should thus get db_exc.DBError for cell0 and
     # correct InstanceList object from cell1.
     ctxt = context.get_admin_context()
     cell0 = self.cell_mappings['cell0']
     cell1 = self.cell_mappings['cell1']
     with context.target_cell(ctxt, cell0) as cctxt:
         inst1 = fake_instance.fake_instance_obj(cctxt)
         if 'id' in inst1:
             delattr(inst1, 'id')
         inst1.create()
     with context.target_cell(ctxt, cell1) as cctxt:
         inst2 = fake_instance.fake_instance_obj(cctxt)
         if 'id' in inst2:
             delattr(inst2, 'id')
         inst2.create()
     with fixtures.DownCellFixture([cell0]):
         results = context.scatter_gather_all_cells(
             ctxt, objects.InstanceList.get_all)
         self.assertEqual(2, len(results))
         for cell_uuid, result in results.items():
             if cell_uuid == cell0.uuid:
                 self.assertIsInstance(result, db_exc.DBError)
             else:
                 self.assertIsInstance(result, objects.InstanceList)
                 self.assertEqual(1, len(result))
                 self.assertEqual(inst2.uuid, result[0].uuid)
Esempio n. 2
0
    def test_fixture_for_an_individual_down_cell_targeted_call(self):
        # We have cell0 and cell1 by default in the setup. We try targeting
        # both the cells. We should get a db error for the down cell and
        # the correct result for the up cell.
        ctxt = context.get_admin_context()
        cell0 = self.cell_mappings['cell0']
        cell1 = self.cell_mappings['cell1']
        with context.target_cell(ctxt, cell0) as cctxt:
            inst1 = fake_instance.fake_instance_obj(cctxt)
            if 'id' in inst1:
                delattr(inst1, 'id')
            inst1.create()
        with context.target_cell(ctxt, cell1) as cctxt:
            inst2 = fake_instance.fake_instance_obj(cctxt)
            if 'id' in inst2:
                delattr(inst2, 'id')
            inst2.create()

        def dummy_tester(ctxt, cell_mapping, uuid):
            with context.target_cell(ctxt, cell_mapping) as cctxt:
                return objects.Instance.get_by_uuid(cctxt, uuid)

        # Scenario A: We do not pass any down cells, fixture automatically
        # assumes the targeted cell is down whether its cell0 or cell1.
        with fixtures.DownCellFixture():
            self.assertRaises(
                db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid)
        # Scenario B: We pass cell0 as the down cell.
        with fixtures.DownCellFixture([cell0]):
            self.assertRaises(
                db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid)
            # Scenario C: We get the correct result from the up cell
            # when targeted.
            result = dummy_tester(ctxt, cell1, inst2.uuid)
            self.assertEqual(inst2.uuid, result.uuid)
Esempio n. 3
0
    def _create_instances(self, pre_newton=2, deleted=0, total=5,
                          target_cell=None):
        if not target_cell:
            target_cell = self.cells[1]

        instances = []
        with context.target_cell(self.context, target_cell) as cctxt:
            flav_dict = objects.Flavor._flavor_get_from_db(cctxt, 1)
            flavor = objects.Flavor(**flav_dict)
            for i in range(0, total):
                inst = objects.Instance(
                    context=cctxt,
                    project_id=self.api.project_id,
                    user_id=FAKE_UUID,
                    vm_state='active',
                    flavor=flavor,
                    created_at=datetime.datetime(1985, 10, 25, 1, 21, 0),
                    launched_at=datetime.datetime(1985, 10, 25, 1, 22, 0),
                    host=self.computes[0].host,
                    hostname='%s-inst%i' % (target_cell.name, i))
                inst.create()

                info_cache = objects.InstanceInfoCache(context=cctxt)
                info_cache.updated_at = timeutils.utcnow()
                info_cache.network_info = network_model.NetworkInfo()
                info_cache.instance_uuid = inst.uuid
                info_cache.save()

                instances.append(inst)

                im = objects.InstanceMapping(context=cctxt,
                    project_id=inst.project_id,
                    user_id=inst.user_id,
                    instance_uuid=inst.uuid,
                    cell_mapping=target_cell)
                im.create()

        # Attach fake interfaces to instances
        network_id = list(self.neutron._networks.keys())[0]
        for i in range(0, len(instances)):
            for k in range(0, 4):
                self.api.attach_interface(instances[i].uuid,
                    {"interfaceAttachment": {"net_id": network_id}})

        with context.target_cell(self.context, target_cell) as cctxt:
            # Fake the pre-newton behaviour by removing the
            # VirtualInterfacesList objects.
            if pre_newton:
                for i in range(0, pre_newton):
                    _delete_vif_list(cctxt, instances[i].uuid)

        if deleted:
            # Delete from the end of active instances list
            for i in range(total - deleted, total):
                instances[i].destroy()

        self.instances += instances
Esempio n. 4
0
    def delete_all(self, req, server_id):
        context = req.environ["nova.context"]
        context.can(st_policies.POLICY_ROOT % 'delete_all')
        im = _get_instance_mapping(context, server_id)

        with nova_context.target_cell(context, im.cell_mapping) as cctxt:
            self._check_instance_in_valid_state(cctxt, server_id,
                                                'delete tags')

        try:
            with nova_context.target_cell(context, im.cell_mapping) as cctxt:
                objects.TagList.destroy(cctxt, server_id)
        except exception.InstanceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())
Esempio n. 5
0
    def update(self, req, server_id, id, body):
        context = req.environ["nova.context"]
        context.can(st_policies.POLICY_ROOT % 'update')
        im = _get_instance_mapping(context, server_id)

        with nova_context.target_cell(context, im.cell_mapping) as cctxt:
            instance = self._check_instance_in_valid_state(
                cctxt, server_id, 'update tag')

        try:
            jsonschema.validate(id, parameter_types.tag)
        except jsonschema.ValidationError as e:
            msg = (_("Tag '%(tag)s' is invalid. It must be a non empty string "
                     "without characters '/' and ','. Validation error "
                     "message: %(err)s") % {'tag': id, 'err': e.message})
            raise webob.exc.HTTPBadRequest(explanation=msg)

        try:
            with nova_context.target_cell(context, im.cell_mapping) as cctxt:
                tags = objects.TagList.get_by_resource_id(cctxt, server_id)
        except exception.InstanceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        if len(tags) >= objects.instance.MAX_TAG_COUNT:
            msg = (_("The number of tags exceeded the per-server limit %d")
                   % objects.instance.MAX_TAG_COUNT)
            raise webob.exc.HTTPBadRequest(explanation=msg)

        if id in _get_tags_names(tags):
            # NOTE(snikitin): server already has specified tag
            return webob.Response(status_int=204)

        try:
            with nova_context.target_cell(context, im.cell_mapping) as cctxt:
                tag = objects.Tag(context=cctxt, resource_id=server_id, tag=id)
                tag.create()
                instance.tags = objects.TagList.get_by_resource_id(cctxt,
                                                                   server_id)
        except exception.InstanceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        notifications_base.send_instance_update_notification(
            context, instance, service="nova-api")

        response = webob.Response(status_int=201)
        response.headers['Location'] = self._view_builder.get_location(
            req, server_id, id)
        return response
Esempio n. 6
0
    def test_connection_switch(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        # Make a request context with a cell mapping
        mapping = objects.CellMapping(context=ctxt,
                                      uuid=uuidutils.generate_uuid(),
                                      database_connection=self.fake_conn,
                                      transport_url='none:///')
        mapping.create()
        # Create an instance in the cell database
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping):
            # Must set project_id because instance get specifies
            # project_only=True to model_query, which means non-admin
            # users can only read instances for their project
            instance = objects.Instance(context=ctxt, uuid=uuid,
                                        project_id='fake-project')
            instance.create()

            # Verify the instance is found in the cell database
            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Verify the instance isn't found in the main database
        self.assertRaises(exception.InstanceNotFound,
                          objects.Instance.get_by_uuid, ctxt, uuid)
Esempio n. 7
0
    def test_populate_user_id_instance_get_fail(self, mock_inst_get):
        cells = []
        celldbs = fixtures.CellDatabases()

        # Create two cell databases and map them
        for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
            cm = cell_mapping.CellMapping(context=self.context, uuid=uuid,
                                          database_connection=uuid,
                                          transport_url='fake://')
            cm.create()
            cells.append(cm)
            celldbs.add_cell_database(uuid)
        self.useFixture(celldbs)

        # Create one instance per cell
        for cell in cells:
            with context.target_cell(self.context, cell) as cctxt:
                inst = instance.Instance(
                    cctxt,
                    project_id=self.context.project_id,
                    user_id=self.context.user_id)
                inst.create()
            create_mapping(project_id=self.context.project_id,
                           user_id=None, cell_id=cell.id,
                           instance_uuid=inst.uuid)

        # Simulate the first cell is down/has some error
        mock_inst_get.side_effect = [test.TestingException(),
                                     instance.InstanceList(objects=[inst])]

        found, done = instance_mapping.populate_user_id(self.context, 1000)
        # Verify we continue to the next cell when a down/error cell is
        # encountered.
        self.assertEqual(2, found)
        self.assertEqual(1, done)
Esempio n. 8
0
    def _get_instances_all_cells(self, context, period_start, period_stop,
                                 tenant_id, limit, marker):
        all_instances = []
        cells = objects.CellMappingList.get_all(context)
        for cell in cells:
            with nova_context.target_cell(context, cell):
                try:
                    instances = (
                        objects.InstanceList.get_active_by_window_joined(
                            context, period_start, period_stop, tenant_id,
                            expected_attrs=['flavor'], limit=limit,
                            marker=marker))
                except exception.MarkerNotFound:
                    # NOTE(danms): We need to keep looking through the later
                    # cells to find the marker
                    continue
                all_instances.extend(instances)
                # NOTE(danms): We must have found a marker if we had one,
                # so make sure we don't require a marker in the next cell
                marker = None
                if limit:
                    limit -= len(instances)
                    if limit <= 0:
                        break
        if marker is not None and len(all_instances) == 0:
            # NOTE(danms): If we did not find the marker in any cell,
            # mimic the db_api behavior here
            raise exception.MarkerNotFound(marker=marker)

        return all_instances
Esempio n. 9
0
def _get_not_deleted(context, uuids):
    mappings = objects.InstanceMappingList.get_by_instance_uuids(
        context, uuids)
    inst_by_cell = collections.defaultdict(list)
    cell_mappings = {}
    found_inst_uuids = []

    # Get a master list of cell mappings, and a list of instance
    # uuids organized by cell
    for im in mappings:
        if not im.cell_mapping:
            # Not scheduled yet, so just throw it in the final list
            # and move on
            found_inst_uuids.append(im.instance_uuid)
            continue
        if im.cell_mapping.uuid not in cell_mappings:
            cell_mappings[im.cell_mapping.uuid] = im.cell_mapping
        inst_by_cell[im.cell_mapping.uuid].append(im.instance_uuid)

    # Query each cell for the instances that are inside, building
    # a list of non-deleted instance uuids.
    for cell_uuid, cell_mapping in cell_mappings.items():
        inst_uuids = inst_by_cell[cell_uuid]
        LOG.debug('Querying cell %(cell)s for %(num)i instances',
                  {'cell': cell_mapping.identity, 'num': len(uuids)})
        filters = {'uuid': inst_uuids, 'deleted': False}
        with nova_context.target_cell(context, cell_mapping) as ctx:
            found_inst_uuids.extend([
                inst.uuid for inst in objects.InstanceList.get_by_filters(
                    ctx, filters=filters)])

    return found_inst_uuids
Esempio n. 10
0
def handle_password(req, meta_data):
    ctxt = context.get_admin_context()
    if req.method == 'GET':
        return meta_data.password
    elif req.method == 'POST':
        # NOTE(vish): The conflict will only happen once the metadata cache
        #             updates, but it isn't a huge issue if it can be set for
        #             a short window.
        if meta_data.password:
            raise exc.HTTPConflict()
        if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
            msg = _("Request is too large.")
            raise exc.HTTPBadRequest(explanation=msg)

        if CONF.api.local_metadata_per_cell:
            instance = objects.Instance.get_by_uuid(ctxt, meta_data.uuid)
        else:
            im = objects.InstanceMapping.get_by_instance_uuid(
                ctxt, meta_data.uuid)
            with context.target_cell(ctxt, im.cell_mapping) as cctxt:
                try:
                    instance = objects.Instance.get_by_uuid(
                        cctxt, meta_data.uuid)
                except exception.InstanceNotFound as e:
                    raise exc.HTTPBadRequest(explanation=e.format_message())

        instance.system_metadata.update(convert_password(ctxt, req.body))
        instance.save()
    else:
        msg = _("GET and POST only are supported.")
        raise exc.HTTPBadRequest(explanation=msg)
Esempio n. 11
0
    def setUp(self):
        super(TestMigrationListObjects, self).setUp()

        self.context = context.RequestContext('fake', 'fake')
        self.num_migrations = 3
        self.migrations = []

        start = datetime.datetime(1985, 10, 25, 1, 21, 0)

        self.cells = objects.CellMappingList.get_all(self.context)
        # Create three migrations in each of the real cells. Leave the
        # first cell empty to make sure we don't break with an empty
        # one.
        for cell in self.cells[1:]:
            for i in range(0, self.num_migrations):
                with context.target_cell(self.context, cell) as cctx:
                    mig = objects.Migration(cctx,
                                            uuid=getattr(
                                                uuidsentinel,
                                                '%s_mig%i' % (cell.name, i)
                                            ),
                                            created_at=start,
                                            migration_type='resize',
                                            instance_uuid=getattr(
                                                uuidsentinel,
                                                'inst%i' % i)
                                            )
                    mig.create()
                self.migrations.append(mig)
Esempio n. 12
0
    def update_all(self, req, server_id, body):
        context = req.environ["nova.context"]
        context.can(st_policies.POLICY_ROOT % 'update_all')
        im = _get_instance_mapping(context, server_id)

        with nova_context.target_cell(context, im.cell_mapping) as cctxt:
            self._check_instance_in_valid_state(cctxt, server_id,
                                                'update tags')

        try:
            with nova_context.target_cell(context, im.cell_mapping) as cctxt:
                tags = objects.TagList.create(cctxt, server_id, body['tags'])
        except exception.InstanceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        return {'tags': _get_tags_names(tags)}
Esempio n. 13
0
    def _get_computes_for_cells(self, context, cells, compute_uuids=None):
        """Get a tuple of compute node and service information.

        Returns a tuple (compute_nodes, services) where:
         - compute_nodes is cell-uuid keyed dict of compute node lists
         - services is a dict of services indexed by hostname
        """

        compute_nodes = collections.defaultdict(list)
        services = {}
        for cell in cells:
            LOG.debug('Getting compute nodes and services for cell %(cell)s',
                      {'cell': cell.identity})
            with context_module.target_cell(context, cell) as cctxt:
                if compute_uuids is None:
                    compute_nodes[cell.uuid].extend(
                        objects.ComputeNodeList.get_all(cctxt))
                else:
                    compute_nodes[cell.uuid].extend(
                        objects.ComputeNodeList.get_all_by_uuids(
                            cctxt, compute_uuids))
                services.update(
                    {service.host: service
                     for service in objects.ServiceList.get_by_binary(
                             cctxt, 'nova-compute',
                             include_disabled=True)})
        return compute_nodes, services
Esempio n. 14
0
    def test_fixture(self):
        # The test setup creates two cell mappings (cell0 and cell1) by
        # default. Let's first list servers across all cells while they are
        # "up" to make sure that works as expected. We'll create a single
        # instance in cell1.
        ctxt = context.get_admin_context()
        cell1 = self.cell_mappings[test.CELL1_NAME]
        with context.target_cell(ctxt, cell1) as cctxt:
            inst = fake_instance.fake_instance_obj(cctxt)
            if 'id' in inst:
                delattr(inst, 'id')
            inst.create()

        # Now list all instances from all cells (should get one back).
        results = context.scatter_gather_all_cells(
            ctxt, objects.InstanceList.get_all)
        self.assertEqual(2, len(results))
        self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID]))
        self.assertEqual(1, len(results[cell1.uuid]))

        # Now do the same but with the DownCellFixture which should result
        # in exception results from both cells.
        with fixtures.DownCellFixture():
            results = context.scatter_gather_all_cells(
                ctxt, objects.InstanceList.get_all)
            self.assertEqual(2, len(results))
            for result in results.values():
                self.assertIsInstance(result, db_exc.DBError)
Esempio n. 15
0
    def test_connection_switch(self):
        # Use a file-based sqlite database so data will persist across new
        # connections
        fake_conn = 'sqlite:///' + self.test_filename

        # The 'main' database connection will stay open, so in-memory is fine
        self.useFixture(fixtures.Database(database='main'))
        self.useFixture(fixtures.Database(connection=fake_conn))

        # Make a request context with a cell mapping
        mapping = objects.CellMapping(database_connection=fake_conn)
        # In the tests, the admin context is required in order to read
        # an Instance back after write, for some reason
        ctxt = context.get_admin_context()
        # Create an instance in the cell database
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping):
            instance = objects.Instance(context=ctxt, uuid=uuid)
            instance.create()

            # Verify the instance is found in the cell database
            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Verify the instance isn't found in the main database
        self.assertRaises(exception.InstanceNotFound,
                          objects.Instance.get_by_uuid, ctxt, uuid)
Esempio n. 16
0
    def _create_service_in_cell(ctxt, cell, binary, is_deleted=False,
                                disabled=False, version=None,
                                create_token_auth=False):
        with context.target_cell(ctxt, cell) as cctxt:
            service = objects.Service(context=cctxt, binary=binary,
                                      disabled=disabled, host='dontcare')
            if version:
                service.version = version
            service.create()

            if is_deleted:
                service.destroy()

            if create_token_auth:
                # We have to create an instance in order to create a token
                # auth.
                inst = objects.Instance(context=cctxt,
                                        uuid=uuidutils.generate_uuid())
                inst.create()
                auth = objects.ConsoleAuthToken(context=cctxt,
                                                console_type='novnc',
                                                host='hostname', port=6080,
                                                instance_uuid=inst.uuid)
                auth.authorize(CONF.consoleauth.token_ttl)

        return service
Esempio n. 17
0
def get_metadata_by_instance_id(instance_id, address, ctxt=None):
    ctxt = ctxt or context.get_admin_context()
    attrs = ['ec2_ids', 'flavor', 'info_cache',
             'metadata', 'system_metadata',
             'security_groups', 'keypairs',
             'device_metadata']

    if CONF.api.local_metadata_per_cell:
        instance = objects.Instance.get_by_uuid(ctxt, instance_id,
                                                expected_attrs=attrs)
        return InstanceMetadata(instance, address)

    try:
        im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_id)
    except exception.InstanceMappingNotFound:
        LOG.warning('Instance mapping for %(uuid)s not found; '
                    'cell setup is incomplete', {'uuid': instance_id})
        instance = objects.Instance.get_by_uuid(ctxt, instance_id,
                                                expected_attrs=attrs)
        return InstanceMetadata(instance, address)

    with context.target_cell(ctxt, im.cell_mapping) as cctxt:
        instance = objects.Instance.get_by_uuid(cctxt, instance_id,
                                                expected_attrs=attrs)
        return InstanceMetadata(instance, address)
Esempio n. 18
0
    def _create_instance_in_cell(ctxt, cell, node, is_deleted=False,
                                 flavor_migrated=False):
        with context.target_cell(ctxt, cell) as cctxt:
            inst = objects.Instance(
                context=cctxt,
                host=node.host,
                node=node.hypervisor_hostname,
                uuid=uuidutils.generate_uuid())
            inst.create()

            if is_deleted:
                inst.destroy()
            else:
                # Create an embedded flavor for the instance. We don't create
                # this because we're in a cell context and flavors are global,
                # but we don't actually care about global flavors in this
                # check.
                extra_specs = {}
                if flavor_migrated:
                    extra_specs['resources:CUSTOM_BAREMETAL_GOLD'] = '1'
                inst.flavor = objects.Flavor(cctxt, extra_specs=extra_specs)
                inst.old_flavor = None
                inst.new_flavor = None
                inst.save()

            return inst
Esempio n. 19
0
    def test_instances_cores_ram_count(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=2, memory_mb=512)
            instance.create()

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=4, memory_mb=1024)
            instance.create()

        # Create an instance in cell2 for a different user
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='other-fake-user',
                                        vcpus=4, memory_mb=1024)
            instance.create()

        # Count instances, cores, and ram across cells
        count = quota._instances_cores_ram_count(ctxt, 'fake-project',
                                                 user_id='fake-user')

        self.assertEqual(3, count['project']['instances'])
        self.assertEqual(10, count['project']['cores'])
        self.assertEqual(2560, count['project']['ram'])
        self.assertEqual(2, count['user']['instances'])
        self.assertEqual(6, count['user']['cores'])
        self.assertEqual(1536, count['user']['ram'])
Esempio n. 20
0
    def test_cell_dbs(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='blah',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='wat',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create an instance and read it from cell1
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping1):
            instance = objects.Instance(context=ctxt, uuid=uuid,
                                        project_id='fake-project')
            instance.create()

            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Make sure it can't be read from cell2
        with context.target_cell(ctxt, mapping2):
            self.assertRaises(exception.InstanceNotFound,
                              objects.Instance.get_by_uuid, ctxt, uuid)

        # Make sure it can still be read from cell1
        with context.target_cell(ctxt, mapping1):
            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Create an instance and read it from cell2
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping2):
            instance = objects.Instance(context=ctxt, uuid=uuid,
                                        project_id='fake-project')
            instance.create()

            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Make sure it can't be read from cell1
        with context.target_cell(ctxt, mapping1):
            self.assertRaises(exception.InstanceNotFound,
                              objects.Instance.get_by_uuid, ctxt, uuid)
Esempio n. 21
0
        def _async_init_instance_info(computes_by_cell):
            context = context_module.RequestContext()
            self._load_cells(context)
            LOG.debug("START:_async_init_instance_info")
            self._instance_info = {}

            count = 0
            if not computes_by_cell:
                computes_by_cell = {}
                for cell in self.cells:
                    with context_module.target_cell(context, cell) as cctxt:
                        cell_cns = objects.ComputeNodeList.get_all(
                            cctxt).objects
                        computes_by_cell[cell] = cell_cns
                        count += len(cell_cns)

            LOG.debug("Total number of compute nodes: %s", count)

            for cell, compute_nodes in computes_by_cell.items():
                # Break the queries into batches of 10 to reduce the total
                # number of calls to the DB.
                batch_size = 10
                start_node = 0
                end_node = batch_size
                while start_node <= len(compute_nodes):
                    curr_nodes = compute_nodes[start_node:end_node]
                    start_node += batch_size
                    end_node += batch_size
                    filters = {"host": [curr_node.host
                                        for curr_node in curr_nodes],
                               "deleted": False}
                    with context_module.target_cell(context, cell) as cctxt:
                        result = objects.InstanceList.get_by_filters(
                            cctxt.elevated(), filters)
                    instances = result.objects
                    LOG.debug("Adding %s instances for hosts %s-%s",
                              len(instances), start_node, end_node)
                    for instance in instances:
                        host = instance.host
                        if host not in self._instance_info:
                            self._instance_info[host] = {"instances": {},
                                                         "updated": False}
                        inst_dict = self._instance_info[host]
                        inst_dict["instances"][instance.uuid] = instance
                    # Call sleep() to cooperatively yield
                    time.sleep(0)
                LOG.debug("END:_async_init_instance_info")
Esempio n. 22
0
    def _check_request_spec_migration(self):
        """Checks to make sure request spec migrations are complete.

        Iterates all cells checking to see that non-deleted instances have
        a matching request spec in the API database. This is necessary in order
        to drop the migrate_instances_add_request_spec online data migration
        and accompanying compatibility code found through nova-api and
        nova-conductor.
        """
        meta = MetaData(bind=db_session.get_api_engine())
        cell_mappings = Table('cell_mappings', meta, autoload=True)
        mappings = cell_mappings.select().execute().fetchall()

        if not mappings:
            # There are no cell mappings so we can't determine this, just
            # return a warning. The cellsv2 check would have already failed
            # on this.
            msg = (_('Unable to determine request spec migrations without '
                     'cell mappings.'))
            return upgradecheck.Result(upgradecheck.Code.WARNING, msg)

        request_specs = Table('request_specs', meta, autoload=True)
        ctxt = nova_context.get_admin_context()
        incomplete_cells = []  # list of cell mapping uuids
        for mapping in mappings:
            with nova_context.target_cell(ctxt, mapping) as cctxt:
                # Get all instance uuids for non-deleted instances in this
                # cell.
                meta = MetaData(bind=db_session.get_engine(context=cctxt))
                instances = Table('instances', meta, autoload=True)
                instance_records = (
                    select([instances.c.uuid]).select_from(instances).where(
                        instances.c.deleted == 0
                    ).execute().fetchall())
                # For each instance in the list, verify that it has a matching
                # request spec in the API DB.
                for inst in instance_records:
                    spec_id = (
                        select([request_specs.c.id]).select_from(
                            request_specs).where(
                            request_specs.c.instance_uuid == inst['uuid']
                        ).execute().scalar())
                    if spec_id is None:
                        # This cell does not have all of its instances
                        # migrated for request specs so track it and move on.
                        incomplete_cells.append(mapping['uuid'])
                        break

        # It's a failure if there are any unmigrated instances at this point
        # because we are planning to drop the online data migration routine and
        # compatibility code in Stein.
        if incomplete_cells:
            msg = (_("The following cells have instances which do not have "
                     "matching request_specs in the API database: %s Run "
                     "'nova-manage db online_data_migrations' on each cell "
                     "to create the missing request specs.") %
                   ', '.join(incomplete_cells))
            return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)
        return upgradecheck.Result(upgradecheck.Code.SUCCESS)
Esempio n. 23
0
    def test_server_group_members_count_by_user(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create a server group the instances will use.
        group = objects.InstanceGroup(context=ctxt)
        group.project_id = ctxt.project_id
        group.user_id = ctxt.user_id
        group.create()
        instance_uuids = []

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)

        # Add the uuids to the group
        objects.InstanceGroup.add_members(ctxt, group.uuid, instance_uuids)
        # add_members() doesn't add the members to the object field
        group.members.extend(instance_uuids)

        # Count server group members across cells
        count = quota._server_group_count_members_by_user(ctxt, group,
                                                          'fake-user')

        self.assertEqual(2, count['user']['server_group_members'])
Esempio n. 24
0
    def _check_api_service_version(self):
        """Checks nova-osapi_compute service versions across cells.

        For non-cellsv1 deployments, based on how the [database]/connection
        is configured for the nova-api service, the nova-osapi_compute service
        versions before 15 will only attempt to lookup instances from the
        local database configured for the nova-api service directly.

        This can cause issues if there are newer API service versions in cell1
        after the upgrade to Ocata, but lingering older API service versions
        in an older database.

        This check will scan all cells looking for a minimum nova-osapi_compute
        service version less than 15 and if found, emit a warning that those
        service entries likely need to be cleaned up.
        """
        # If we're using cells v1 then we don't care about this.
        if CONF.cells.enable:
            return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)

        meta = MetaData(bind=db_session.get_api_engine())
        cell_mappings = Table('cell_mappings', meta, autoload=True)
        mappings = cell_mappings.select().execute().fetchall()

        if not mappings:
            # There are no cell mappings so we can't determine this, just
            # return a warning. The cellsv2 check would have already failed
            # on this.
            msg = (_('Unable to determine API service versions without '
                     'cell mappings.'))
            return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg)

        ctxt = nova_context.get_admin_context()
        cells_with_old_api_services = []
        for mapping in mappings:
            with nova_context.target_cell(ctxt, mapping) as cctxt:
                # Get the minimum nova-osapi_compute service version in this
                # cell.
                min_version = self._get_min_service_version(
                    cctxt, 'nova-osapi_compute')
                if min_version is not None and min_version < 15:
                    cells_with_old_api_services.append(mapping['uuid'])

        # If there are any cells with older API versions, we report it as a
        # warning since we don't know how the actual nova-api service is
        # configured, but we need to give the operator some indication that
        # they have something to investigate/cleanup.
        if cells_with_old_api_services:
            msg = (_("The following cells have 'nova-osapi_compute' services "
                     "with version < 15 which may cause issues when querying "
                     "instances from the API: %s. Depending on how nova-api "
                     "is configured, this may not be a problem, but is worth "
                     "investigating and potentially cleaning up those older "
                     "records. See "
                     "https://bugs.launchpad.net/nova/+bug/1759316 for "
                     "details.") % ', '.join(cells_with_old_api_services))
            return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg)
        return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
Esempio n. 25
0
 def _assert_instance_az(self, server, expected_zone):
     # Check the API.
     self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])
     # Check the DB.
     ctxt = context.get_admin_context()
     with context.target_cell(
             ctxt, self.cell_mappings[test.CELL1_NAME]) as cctxt:
         instance = objects.Instance.get_by_uuid(cctxt, server['id'])
         self.assertEqual(expected_zone, instance.availability_zone)
 def test_target_cell(self, mock_create_ctxt_mgr):
     mock_create_ctxt_mgr.return_value = mock.sentinel.cm
     ctxt = context.RequestContext("111", "222", roles=["admin", "weasel"])
     # Verify the existing db_connection, if any, is restored
     ctxt.db_connection = mock.sentinel.db_conn
     mapping = objects.CellMapping(database_connection="fake://")
     with context.target_cell(ctxt, mapping):
         self.assertEqual(ctxt.db_connection, mock.sentinel.cm)
     self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
Esempio n. 27
0
    def test_check_success(self):
        """Tests the success scenario where we have cell0 with a current API
        service, cell1 with no API services, and an empty cell2.
        """
        self._setup_cells()
        ctxt = context.get_admin_context()
        cell0 = self.cell_mappings['cell0']
        with context.target_cell(ctxt, cell0) as cctxt:
            self._create_service(cctxt, host='cell0host1',
                                 binary='nova-osapi_compute', version=15)

        cell1 = self.cell_mappings['cell1']
        with context.target_cell(ctxt, cell1) as cctxt:
            self._create_service(cctxt, host='cell1host1',
                                 binary='nova-compute', version=15)

        result = self.cmd._check_api_service_version()
        self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
Esempio n. 28
0
    def test_check_warning(self):
        """This is a failure scenario where we have the following setup:

        Three cells where:

        1. The first cell has two API services, one with version < 15 and one
           with version >= 15.
        2. The second cell has two services, one with version < 15 but it's
           deleted so it gets filtered out, and one with version >= 15.
        3. The third cell doesn't have any API services, just old compute
           services which should be filtered out.

        In this scenario, the first cell should be reported with a warning.
        """
        self._setup_cells()
        ctxt = context.get_admin_context()
        cell0 = self.cell_mappings['cell0']
        with context.target_cell(ctxt, cell0) as cctxt:
            self._create_service(cctxt, host='cell0host1',
                                 binary='nova-osapi_compute', version=14)
            self._create_service(cctxt, host='cell0host2',
                                 binary='nova-osapi_compute', version=15)

        cell1 = self.cell_mappings['cell1']
        with context.target_cell(ctxt, cell1) as cctxt:
            svc = self._create_service(
                cctxt, host='cell1host1', binary='nova-osapi_compute',
                version=14)
            # This deleted record with the old version should get filtered out.
            svc.destroy()
            self._create_service(cctxt, host='cell1host2',
                                 binary='nova-osapi_compute', version=16)

        cell2 = self.cell_mappings['cell2']
        with context.target_cell(ctxt, cell2) as cctxt:
            self._create_service(cctxt, host='cell2host1',
                                 binary='nova-compute', version=14)

        result = self.cmd._check_api_service_version()
        self.assertEqual(status.UpgradeCheckCode.WARNING, result.code)
        # The only cell in the message should be cell0.
        self.assertIn(cell0.uuid, result.details)
        self.assertNotIn(cell1.uuid, result.details)
        self.assertNotIn(cell2.uuid, result.details)
Esempio n. 29
0
 def test_service_get_all_cells(self):
     cells = objects.CellMappingList.get_all(self.ctxt)
     for cell in cells:
         with context.target_cell(self.ctxt, cell) as cctxt:
             objects.Service(context=cctxt,
                             binary='nova-compute',
                             host='host-%s' % cell.uuid).create()
     services = self.host_api.service_get_all(self.ctxt, all_cells=True)
     self.assertEqual(sorted(['host-%s' % cell.uuid for cell in cells]),
                      sorted([svc.host for svc in services]))
Esempio n. 30
0
 def _create_services(self, *versions):
     cells = objects.CellMappingList.get_all(self.context)
     index = 0
     for version in versions:
         service = objects.Service(context=self.context,
                                   binary='nova-compute')
         service.version = version
         cell = cells[index % len(cells)]
         with context.target_cell(self.context, cell):
             service.create()
         index += 1
Esempio n. 31
0
    def _create_instance_in_cell(ctxt,
                                 cell,
                                 is_deleted=False,
                                 create_request_spec=False):
        with context.target_cell(ctxt, cell) as cctxt:
            inst = objects.Instance(context=cctxt,
                                    uuid=uuidutils.generate_uuid())
            inst.create()

            if is_deleted:
                inst.destroy()

        if create_request_spec:
            # Fake out some fields in the Instance so we don't lazy-load them.
            inst.flavor = objects.Flavor()
            inst.numa_topology = None
            inst.system_metadata = {}
            inst.pci_requests = None
            inst.project_id = 'fake-project'
            inst.user_id = 'fake-user'
            _create_minimal_request_spec(ctxt, inst)

        return inst
Esempio n. 32
0
 def test_delete_while_in_verify_resize_status(self):
     """Tests that when deleting a server in VERIFY_RESIZE status, the
     data is cleaned from both the source and target cell.
     """
     server = self._resize_and_validate()[0]
     self.api.delete_server(server['id'])
     self._wait_until_deleted(server)
     # Now list servers to make sure it doesn't show up from the source cell
     servers = self.api.get_servers()
     self.assertEqual(0, len(servers), servers)
     # FIXME(mriedem): Need to cleanup from source cell in API method
     # _confirm_resize_on_deleting(). The above check passes because the
     # instance is still hidden in the source cell so the API filters it
     # out.
     target_host = server['OS-EXT-SRV-ATTR:host']
     source_host = 'host1' if target_host == 'host2' else 'host2'
     source_cell = self.cell_mappings[
         self.host_to_cell_mappings[source_host]]
     ctxt = nova_context.get_admin_context()
     with nova_context.target_cell(ctxt, source_cell) as cctxt:
         # Once the API is fixed this should raise InstanceNotFound.
         instance = objects.Instance.get_by_uuid(cctxt, server['id'])
         self.assertTrue(instance.hidden)
Esempio n. 33
0
def _get_marker_instance(ctx, marker):
    """Get the marker instance from its cell.

    This returns the marker instance from the cell in which it lives
    """

    try:
        im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker)
    except exception.InstanceMappingNotFound:
        raise exception.MarkerNotFound(marker=marker)

    elevated = ctx.elevated(read_deleted='yes')
    with context.target_cell(elevated, im.cell_mapping) as cctx:
        try:
            # NOTE(danms): We query this with no columns_to_join()
            # as we're just getting values for the sort keys from
            # it and none of the valid sort keys are on joined
            # columns.
            db_inst = db.instance_get_by_uuid(cctx, marker,
                                              columns_to_join=[])
        except exception.InstanceNotFound:
            raise exception.MarkerNotFound(marker=marker)
    return db_inst
Esempio n. 34
0
    def delete_server_and_assert_cleanup(self, server):
        """Deletes the server and makes various cleanup checks.

        - makes sure allocations from placement are gone
        - makes sure the instance record is gone from both cells
        - makes sure there are no leaked volume attachments

        :param server: dict of the server resource to delete
        """
        # Determine which cell the instance was in when the server was deleted
        # in the API so we can check hard vs soft delete in the DB.
        current_cell = self.host_to_cell_mappings[
            server['OS-EXT-SRV-ATTR:host']]
        # Delete the server and check that the allocations are gone from
        # the placement service.
        self._delete_and_check_allocations(server)
        # Make sure the instance record is gone from both cell databases.
        ctxt = nova_context.get_admin_context()
        for cell_name in self.host_to_cell_mappings.values():
            cell = self.cell_mappings[cell_name]
            with nova_context.target_cell(ctxt, cell) as cctxt:
                # If this is the current cell the instance was in when it was
                # deleted it should be soft-deleted (instance.deleted!=0),
                # otherwise it should be hard-deleted and getting it with a
                # read_deleted='yes' context should still raise.
                read_deleted = 'no' if current_cell == cell_name else 'yes'
                with utils.temporary_mutation(cctxt,
                                              read_deleted=read_deleted):
                    self.assertRaises(exception.InstanceNotFound,
                                      objects.Instance.get_by_uuid, cctxt,
                                      server['id'])
        # Make sure there are no leaked volume attachments.
        attachment_count = self._count_volume_attachments(server['id'])
        self.assertEqual(
            0, attachment_count,
            'Leaked volume attachments: %s' % self.cinder.volume_to_attachment)
Esempio n. 35
0
    def setUp(self):
        super(ImageCacheTest, self).setUp()

        self.flags(compute_driver='fake.FakeDriverWithCaching')

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
        self.context = context.get_admin_context()

        self.conductor = self.start_service('conductor')
        self.compute1 = self.start_service('compute', host='compute1')
        self.compute2 = self.start_service('compute', host='compute2')
        self.compute3 = self.start_service('compute', host='compute3',
                                           cell='cell2')
        self.compute4 = self.start_service('compute', host='compute4',
                                           cell='cell2')
        self.compute5 = self.start_service('compute', host='compute5',
                                           cell='cell2')

        cell2 = self.cell_mappings['cell2']
        with context.target_cell(self.context, cell2) as cctxt:
            srv = objects.Service.get_by_compute_host(cctxt, 'compute5')
            srv.forced_down = True
            srv.save()
Esempio n. 36
0
def handle_password(req, meta_data):
    ctxt = context.get_admin_context()
    if req.method == 'GET':
        return meta_data.password
    elif req.method == 'POST':
        # NOTE(vish): The conflict will only happen once the metadata cache
        #             updates, but it isn't a huge issue if it can be set for
        #             a short window.
        if meta_data.password:
            raise exc.HTTPConflict()
        if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
            msg = _("Request is too large.")
            raise exc.HTTPBadRequest(explanation=msg)

        im = objects.InstanceMapping.get_by_instance_uuid(ctxt, meta_data.uuid)
        with context.target_cell(ctxt, im.cell_mapping) as cctxt:
            try:
                instance = objects.Instance.get_by_uuid(cctxt, meta_data.uuid)
            except exception.InstanceNotFound as e:
                raise exc.HTTPBadRequest(explanation=e.format_message())
        instance.system_metadata.update(convert_password(ctxt, req.body))
        instance.save()
    else:
        raise exc.HTTPBadRequest()
Esempio n. 37
0
def populate_user_id(context, max_count):
    cells = objects.CellMappingList.get_all(context)
    cms_by_id = {cell.id: cell for cell in cells}
    done = 0
    unmigratable_ims = False
    ims = (
        # Get a list of instance mappings which do not have user_id populated.
        # We need to include records with queued_for_delete=True because they
        # include SOFT_DELETED instances, which could be restored at any time
        # in the future. If we don't migrate SOFT_DELETED instances now, we
        # wouldn't be able to retire this migration code later. Also filter
        # out the marker instance created by the virtual interface migration.
        context.session.query(
            api_models.InstanceMapping
        ).filter_by(user_id=None).filter(api_models.InstanceMapping.project_id
                                         != virtual_interface.FAKE_UUID
                                         ).limit(max_count).all())
    found = len(ims)
    ims_by_inst_uuid = {}
    inst_uuids_by_cell_id = collections.defaultdict(set)
    for im in ims:
        ims_by_inst_uuid[im.instance_uuid] = im
        inst_uuids_by_cell_id[im.cell_id].add(im.instance_uuid)
    for cell_id, inst_uuids in inst_uuids_by_cell_id.items():
        # We cannot migrate instance mappings that don't have a cell yet.
        if cell_id is None:
            unmigratable_ims = True
            continue
        with nova_context.target_cell(context, cms_by_id[cell_id]) as cctxt:
            # We need to migrate SOFT_DELETED instances because they could be
            # restored at any time in the future, preventing us from being able
            # to remove any other interim online data migration code we have,
            # if we don't migrate them here.
            # NOTE: it's not possible to query only for SOFT_DELETED instances.
            # We must query for both deleted and SOFT_DELETED instances.
            filters = {'uuid': inst_uuids}
            try:
                instances = objects.InstanceList.get_by_filters(
                    cctxt, filters, expected_attrs=[])
            except Exception as exp:
                LOG.warning(
                    'Encountered exception: "%s" while querying '
                    'instances from cell: %s. Continuing to the next '
                    'cell.', str(exp), cms_by_id[cell_id].identity)
                continue
        # Walk through every instance that has a mapping needing to be updated
        # and update it.
        for instance in instances:
            im = ims_by_inst_uuid.pop(instance.uuid)
            im.user_id = instance.user_id
            context.session.add(im)
            done += 1
        if ims_by_inst_uuid:
            unmigratable_ims = True
        if done >= max_count:
            break

    if unmigratable_ims:
        LOG.warning('Some instance mappings were not migratable. This may '
                    'be transient due to in-flight instance builds, or could '
                    'be due to stale data that will be cleaned up after '
                    'running "nova-manage db archive_deleted_rows --purge".')

    return found, done
Esempio n. 38
0
    def _check_resource_providers(self):
        """Checks the status of resource provider reporting.

        This check relies on the cells v2 check passing because it queries the
        cells for compute nodes using cell mappings.

        This check relies on the placement service running because if it's not
        then there won't be any resource providers for the filter scheduler to
        use during instance build and move requests.

        Note that in Ocata, the filter scheduler will only use placement if
        the minimum nova-compute service version in the deployment is >= 16
        which signals when nova-compute will fail to start if placement is not
        configured on the compute. Otherwise the scheduler will fallback
        to pulling compute nodes from the database directly as it has always
        done. That fallback will be removed in Pike.
        """

        # Get the total count of resource providers from the API DB that can
        # host compute resources. This might be 0 so we have to figure out if
        # this is a fresh install and if so we don't consider this an error.
        num_rps = self._count_compute_resource_providers()

        cell_mappings = self._get_non_cell0_mappings()
        ctxt = nova_context.get_admin_context()
        num_computes = 0
        for cell_mapping in cell_mappings:
            with nova_context.target_cell(ctxt, cell_mapping) as cctxt:
                num_computes += self._count_compute_nodes(cctxt)
        else:
            # There are no cell mappings, cells v2 was maybe not deployed in
            # Newton, but placement might have been, so let's check the single
            # database for compute nodes.
            num_computes = self._count_compute_nodes()

        if num_rps == 0:

            if num_computes != 0:
                # This is a warning because there are compute nodes in the
                # database but nothing is reporting resource providers to the
                # placement service. This will not result in scheduling
                # failures in Ocata because of the fallback that is in place
                # but we signal it as a warning since there is work to do.
                msg = (_('There are no compute resource providers in the '
                         'Placement service but there are %(num_computes)s '
                         'compute nodes in the deployment. This means no '
                         'compute nodes are reporting into the Placement '
                         'service and need to be upgraded and/or fixed. See '
                         '%(placement_docs_link)s for more details.') % {
                             'num_computes': num_computes,
                             'placement_docs_link': PLACEMENT_DOCS_LINK
                         })
                return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg)

            # There are no resource providers and no compute nodes so we
            # assume this is a fresh install and move on. We should return a
            # success code with a message here though.
            msg = (_('There are no compute resource providers in the '
                     'Placement service nor are there compute nodes in the '
                     'database. Remember to configure new compute nodes to '
                     'report into the Placement service. See '
                     '%(placement_docs_link)s for more details.') % {
                         'placement_docs_link': PLACEMENT_DOCS_LINK
                     })
            return UpgradeCheckResult(UpgradeCheckCode.SUCCESS, msg)

        elif num_rps < num_computes:
            # There are fewer resource providers than compute nodes, so return
            # a warning explaining that the deployment might be underutilized.
            # Technically this is not going to result in scheduling failures in
            # Ocata because of the fallback that is in place if there are older
            # compute nodes still, but it is probably OK to leave the wording
            # on this as-is to prepare for when the fallback is removed in
            # Pike.
            msg = (_('There are %(num_resource_providers)s compute resource '
                     'providers and %(num_compute_nodes)s compute nodes in '
                     'the deployment. Ideally the number of compute resource '
                     'providers should equal the number of enabled compute '
                     'nodes otherwise the cloud may be underutilized. '
                     'See %(placement_docs_link)s for more details.') % {
                         'num_resource_providers': num_rps,
                         'num_compute_nodes': num_computes,
                         'placement_docs_link': PLACEMENT_DOCS_LINK
                     })
            return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg)
        else:
            # We have RPs >= CNs which is what we want to see.
            return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
Esempio n. 39
0
def fill_virtual_interface_list(context, max_count):
    """This fills missing VirtualInterface Objects in Nova DB"""
    count_hit = 0
    count_all = 0

    def _regenerate_vif_list_base_on_cache(context, instance, old_vif_list,
                                           nw_info):
        # Set old VirtualInterfaces as deleted.
        for vif in old_vif_list:
            vif.destroy()

        # Generate list based on current cache:
        for vif in nw_info:
            vif_obj = objects.VirtualInterface(context)
            vif_obj.uuid = vif['id']
            vif_obj.address = "%s/%s" % (vif['address'], vif['id'])
            vif_obj.instance_uuid = instance['uuid']
            # Find tag from previous VirtualInterface object if exist.
            old_vif = [x for x in old_vif_list if x.uuid == vif['id']]
            vif_obj.tag = old_vif[0].tag if len(old_vif) > 0 else None
            vif_obj.create()

    cells = objects.CellMappingList.get_all(context)
    for cell in cells:
        if count_all == max_count:
            # We reached the limit of checked instances per
            # this function run.
            # Stop, do not go to other cell.
            break

        with nova_context.target_cell(context, cell) as cctxt:
            marker = _get_marker_for_migrate_instances(cctxt)
            filters = {'deleted': False}

            # Adjust the limit of migrated instances.
            # If user wants to process a total of 100 instances
            # and we did a 75 in cell1, then we only need to
            # verify 25 more in cell2, no more.
            adjusted_limit = max_count - count_all

            instances = objects.InstanceList.get_by_filters(
                cctxt,
                filters=filters,
                sort_key='created_at',
                sort_dir='asc',
                marker=marker,
                limit=adjusted_limit)

            for instance in instances:
                # We don't want to fill vif for FAKE instance.
                if instance.uuid == FAKE_UUID:
                    continue

                try:
                    info_cache = objects.InstanceInfoCache.\
                        get_by_instance_uuid(cctxt, instance.get('uuid'))
                    if not info_cache.network_info:
                        LOG.info('InstanceInfoCache object has not set '
                                 'NetworkInfo field. '
                                 'Skipping build of VirtualInterfaceList.')
                        continue
                except exception.InstanceInfoCacheNotFound:
                    LOG.info('Instance has no InstanceInfoCache object. '
                             'Skipping build of VirtualInterfaceList for it.')
                    continue

                # It by design filters out deleted vifs.
                vif_list = VirtualInterfaceList.\
                    get_by_instance_uuid(cctxt, instance.get('uuid'))

                nw_info = info_cache.network_info
                # This should be list with proper order of vifs,
                # but we're not sure about that.
                cached_vif_ids = [vif['id'] for vif in nw_info]
                # This is ordered list of vifs taken from db.
                db_vif_ids = [vif.uuid for vif in vif_list]

                count_all += 1
                if cached_vif_ids == db_vif_ids:
                    # The list of vifs and its order in cache and in
                    # virtual_interfaces is the same. So we could end here.
                    continue
                elif len(db_vif_ids) < len(cached_vif_ids):
                    # Seems to be an instance from release older than
                    # Newton and we don't have full VirtualInterfaceList for
                    # it. Rewrite whole VirtualInterfaceList using interface
                    # order from InstanceInfoCache.
                    count_hit += 1
                    LOG.info(
                        'Got an instance %s with less VIFs defined in DB '
                        'than in cache. Could be Pre-Newton instance. '
                        'Building new VirtualInterfaceList for it.',
                        instance.uuid)
                    _regenerate_vif_list_base_on_cache(cctxt, instance,
                                                       vif_list, nw_info)
                elif len(db_vif_ids) > len(cached_vif_ids):
                    # Seems vif list is inconsistent with cache.
                    # it could be a broken cache or interface
                    # during attach. Do nothing.
                    LOG.info(
                        'Got an unexpected number of VIF records in the '
                        'database compared to what was stored in the '
                        'instance_info_caches table for instance %s. '
                        'Perhaps it is an instance during interface '
                        'attach. Do nothing.', instance.uuid)
                    continue
                else:
                    # The order is different between lists.
                    # We need a source of truth, so rebuild order
                    # from cache.
                    count_hit += 1
                    LOG.info(
                        'Got an instance %s with different order of '
                        'VIFs between DB and cache. '
                        'We need a source of truth, so rebuild order '
                        'from cache.', instance.uuid)
                    _regenerate_vif_list_base_on_cache(cctxt, instance,
                                                       vif_list, nw_info)

            # Set marker to point last checked instance.
            if instances:
                marker = instances[-1].uuid
                _set_or_delete_marker_for_migrate_instances(cctxt, marker)

    return count_all, count_hit
    def test_live_migrate_implicit_az(self):
        """Tests live migration of an instance with an implicit AZ.

        Before Pike, a server created without an explicit availability zone
        was assigned a default AZ based on the "default_schedule_zone" config
        option which defaults to None, which allows the instance to move
        freely between availability zones.

        With change I8d426f2635232ffc4b510548a905794ca88d7f99 in Pike, if the
        user does not request an availability zone, the
        instance.availability_zone field is set based on the host chosen by
        the scheduler. The default AZ for all nova-compute services is
        determined by the "default_availability_zone" config option which
        defaults to "nova".

        This test creates two nova-compute services in separate zones, creates
        a server without specifying an explicit zone, and then tries to live
        migrate the instance to the other compute which should succeed because
        the request spec does not include an explicit AZ, so the instance is
        still not restricted to its current zone even if it says it is in one.
        """
        # Start two compute services in separate zones.
        self._start_host_in_zone('host1', 'zone1')
        self._start_host_in_zone('host2', 'zone2')

        # Create a server, it doesn't matter which host it ends up in.
        server_body = self._build_minimal_create_server_request(
            self.api,
            'test_live_migrate_implicit_az_restriction',
            image_uuid=fake_image.get_valid_image_id(),
            networks='none')
        server = self.api.post_server({'server': server_body})
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')
        original_host = server['OS-EXT-SRV-ATTR:host']
        # Assert the server has the AZ set (not None or 'nova').
        expected_zone = 'zone1' if original_host == 'host1' else 'zone2'
        self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])

        # Attempt to live migrate the instance; again, we don't specify a host
        # because there are only two hosts so the scheduler would only be able
        # to pick the second host which is in a different zone.
        live_migrate_req = {
            'os-migrateLive': {
                'block_migration': 'auto',
                'host': None
            }
        }
        self.api.post_server_action(server['id'], live_migrate_req)

        # Poll the migration until it is done.
        migration = self._wait_for_migration_status(server, ['completed'])
        self.assertEqual('live-migration', migration['migration_type'])

        # Assert that the server did move. Note that we check both the API and
        # the database because the API will return the AZ from the host
        # aggregate if instance.host is not None.
        server = self.api.get_server(server['id'])
        expected_zone = 'zone2' if original_host == 'host1' else 'zone1'
        self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])

        ctxt = context.get_admin_context()
        with context.target_cell(ctxt,
                                 self.cell_mappings[test.CELL1_NAME]) as cctxt:
            instance = objects.Instance.get_by_uuid(cctxt, server['id'])
            self.assertEqual(expected_zone, instance.availability_zone)
Esempio n. 41
0
 def _assert_instance_not_in_cell(self, cell_name, server_id):
     cell = self.cell_mappings[cell_name]
     ctxt = nova_context.get_admin_context(read_deleted='yes')
     with nova_context.target_cell(ctxt, cell) as cctxt:
         self.assertRaises(exception.InstanceNotFound,
                           objects.Instance.get_by_uuid, cctxt, server_id)
Esempio n. 42
0
    def _check_resource_providers(self):
        """Checks the status of resource provider reporting.

        This check relies on the cells v2 check passing because it queries the
        cells for compute nodes using cell mappings.

        This check relies on the placement service running because if it's not
        then there won't be any resource providers for the filter scheduler to
        use during instance build and move requests.
        """

        # Get the total count of resource providers from the API DB that can
        # host compute resources. This might be 0 so we have to figure out if
        # this is a fresh install and if so we don't consider this an error.
        num_rps = self._count_compute_resource_providers()

        cell_mappings = self._get_non_cell0_mappings()
        ctxt = nova_context.get_admin_context()
        num_computes = 0
        for cell_mapping in cell_mappings:
            with nova_context.target_cell(ctxt, cell_mapping):
                num_computes += self._count_compute_nodes(ctxt)
        else:
            # There are no cell mappings, cells v2 was maybe not deployed in
            # Newton, but placement might have been, so let's check the single
            # database for compute nodes.
            num_computes = self._count_compute_nodes()

        if num_rps == 0:

            if num_computes != 0:
                # This is a failure because there are compute nodes in the
                # database but nothing is reporting resource providers to the
                # placement service.
                msg = (_('There are no compute resource providers in the '
                         'Placement service but there are %(num_computes)s '
                         'compute nodes in the deployment. This means no '
                         'compute nodes are reporting into the Placement '
                         'service and need to be upgraded and/or fixed. See '
                         '%(placement_docs_link)s for more details.') %
                       {'num_computes': num_computes,
                        'placement_docs_link': PLACEMENT_DOCS_LINK})
                return UpgradeCheckResult(UpgradeCheckCode.FAILURE, msg)

            # There are no resource providers and no compute nodes so we
            # assume this is a fresh install and move on. We should return a
            # success code with a message here though.
            msg = (_('There are no compute resource providers in the '
                     'Placement service nor are there compute nodes in the '
                     'database. Remember to configure new compute nodes to '
                     'report into the Placement service. See '
                     '%(placement_docs_link)s for more details.') %
                   {'placement_docs_link': PLACEMENT_DOCS_LINK})
            return UpgradeCheckResult(UpgradeCheckCode.SUCCESS, msg)

        elif num_rps < num_computes:
            # There are fewer resource providers than compute nodes, so return
            # a warning explaining that the deployment might be underutilized.
            msg = (_('There are %(num_resource_providers)s compute resource '
                     'providers and %(num_compute_nodes)s compute nodes in '
                     'the deployment. Ideally the number of compute resource '
                     'providers should equal the number of enabled compute '
                     'nodes otherwise the cloud may be underutilized. '
                     'See %(placement_docs_link)s for more details.') %
                   {'num_resource_providers': num_rps,
                    'num_compute_nodes': num_computes,
                    'placement_docs_link': PLACEMENT_DOCS_LINK})
            return UpgradeCheckResult(UpgradeCheckCode.WARNING, msg)
        else:
            # We have RPs >= CNs which is what we want to see.
            return UpgradeCheckResult(UpgradeCheckCode.SUCCESS)
Esempio n. 43
0
    def _check_console_auths(self):
        """Checks for console usage and warns with info for rolling upgrade.

        Iterates all cells checking to see if the nova-consoleauth service is
        non-deleted/non-disabled and whether there are any console token auths
        in that cell database. If there is a nova-consoleauth service being
        used and no console token auths in the cell database, emit a warning
        telling the user to set [workarounds]enable_consoleauth = True if they
        are performing a rolling upgrade.
        """
        # If we're using cells v1, we don't need to check if the workaround
        # needs to be used because cells v1 always uses nova-consoleauth.
        # If the operator has already enabled the workaround, we don't need
        # to check anything.
        if CONF.cells.enable or CONF.workarounds.enable_consoleauth:
            return upgradecheck.Result(upgradecheck.Code.SUCCESS)

        # We need to check cell0 for nova-consoleauth service records because
        # it's possible a deployment could have services stored in the cell0
        # database, if they've defaulted their [database]connection in
        # nova.conf to cell0.
        meta = MetaData(bind=db_session.get_api_engine())
        cell_mappings = Table('cell_mappings', meta, autoload=True)
        mappings = cell_mappings.select().execute().fetchall()

        if not mappings:
            # There are no cell mappings so we can't determine this, just
            # return a warning. The cellsv2 check would have already failed
            # on this.
            msg = (_('Unable to check consoles without cell mappings.'))
            return upgradecheck.Result(upgradecheck.Code.WARNING, msg)

        ctxt = nova_context.get_admin_context()
        # If we find a non-deleted, non-disabled nova-consoleauth service in
        # any cell, we will assume the deployment is using consoles.
        using_consoles = False
        for mapping in mappings:
            with nova_context.target_cell(ctxt, mapping) as cctxt:
                # Check for any non-deleted, non-disabled nova-consoleauth
                # service.
                meta = MetaData(bind=db_session.get_engine(context=cctxt))
                services = Table('services', meta, autoload=True)
                consoleauth_service_record = (select([
                    services.c.id
                ]).select_from(services).where(
                    and_(services.c.binary == 'nova-consoleauth',
                         services.c.deleted == 0,
                         services.c.disabled == false())).execute().first())
                if consoleauth_service_record:
                    using_consoles = True
                    break

        if using_consoles:
            # If the deployment is using consoles, we can only be certain the
            # upgrade is complete if each compute service is >= Rocky and
            # supports storing console token auths in the database backend.
            for mapping in mappings:
                # Skip cell0 as no compute services should be in it.
                if mapping['uuid'] == cell_mapping_obj.CellMapping.CELL0_UUID:
                    continue
                # Get the minimum nova-compute service version in this
                # cell.
                with nova_context.target_cell(ctxt, mapping) as cctxt:
                    min_version = self._get_min_service_version(
                        cctxt, 'nova-compute')
                    # We could get None for the minimum version in the case of
                    # new install where there are no computes. If there are
                    # compute services, they should all have versions.
                    if min_version is not None and min_version < 35:
                        msg = _("One or more cells were found which have "
                                "nova-compute services older than Rocky. "
                                "Please set the "
                                "'[workarounds]enable_consoleauth' "
                                "configuration option to 'True' on your "
                                "console proxy host if you are performing a "
                                "rolling upgrade to enable consoles to "
                                "function during a partial upgrade.")
                        return upgradecheck.Result(upgradecheck.Code.WARNING,
                                                   msg)

        return upgradecheck.Result(upgradecheck.Code.SUCCESS)
Esempio n. 44
0
    def test_server_group_members_count_by_user(self, uid_qfd_populated,
                                                mock_uid_qfd_populated,
                                                mock_warn_log):
        mock_uid_qfd_populated.return_value = uid_qfd_populated
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create a server group the instances will use.
        group = objects.InstanceGroup(context=ctxt)
        group.project_id = ctxt.project_id
        group.user_id = ctxt.user_id
        group.create()
        instance_uuids = []

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping1.id)
        im.create()

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping2.id)
        im.create()

        # Create an instance that is queued for delete in cell2. It should not
        # be counted
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user')
            instance.create()
            instance.destroy()
            instance_uuids.append(instance.uuid)
        im = objects.InstanceMapping(context=ctxt,
                                     instance_uuid=instance.uuid,
                                     project_id='fake-project',
                                     user_id='fake-user',
                                     cell_id=mapping2.id,
                                     queued_for_delete=True)
        im.create()

        # Add the uuids to the group
        objects.InstanceGroup.add_members(ctxt, group.uuid, instance_uuids)
        # add_members() doesn't add the members to the object field
        group.members.extend(instance_uuids)

        # Count server group members from instance mappings or cell databases,
        # depending on whether the user_id/queued_for_delete data migration has
        # been completed.
        count = quota._server_group_count_members_by_user(
            ctxt, group, 'fake-user')

        self.assertEqual(2, count['user']['server_group_members'])

        if uid_qfd_populated:
            # Did not log a warning about falling back to legacy count.
            mock_warn_log.assert_not_called()
        else:
            # Logged a warning about falling back to legacy count.
            mock_warn_log.assert_called_once()
Esempio n. 45
0
    def test_instances_cores_ram_count(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        mapping1 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell1',
                                       transport_url='none:///')
        mapping2 = objects.CellMapping(context=ctxt,
                                       uuid=uuidutils.generate_uuid(),
                                       database_connection='cell2',
                                       transport_url='none:///')
        mapping1.create()
        mapping2.create()

        # Create an instance in cell1
        with context.target_cell(ctxt, mapping1) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=2,
                                        memory_mb=512)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping1,
                                         project_id='fake-project')
            im.create()

        # Create an instance in cell2
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='fake-user',
                                        vcpus=4,
                                        memory_mb=1024)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping2,
                                         project_id='fake-project')
            im.create()

        # Create an instance in cell2 for a different user
        with context.target_cell(ctxt, mapping2) as cctxt:
            instance = objects.Instance(context=cctxt,
                                        project_id='fake-project',
                                        user_id='other-fake-user',
                                        vcpus=4,
                                        memory_mb=1024)
            instance.create()
            # create mapping for the instance since we query only those cells
            # in which the project has instances based on the instance_mappings
            im = objects.InstanceMapping(context=ctxt,
                                         instance_uuid=instance.uuid,
                                         cell_mapping=mapping2,
                                         project_id='fake-project')
            im.create()

        # Count instances, cores, and ram across cells
        count = quota._instances_cores_ram_count(ctxt,
                                                 'fake-project',
                                                 user_id='fake-user')

        self.assertEqual(3, count['project']['instances'])
        self.assertEqual(10, count['project']['cores'])
        self.assertEqual(2560, count['project']['ram'])
        self.assertEqual(2, count['user']['instances'])
        self.assertEqual(6, count['user']['cores'])
        self.assertEqual(1536, count['user']['ram'])
Esempio n. 46
0
    def create(self, req, body):
        """Creates a new instance event."""
        context = req.environ['nova.context']
        context.can(see_policies.POLICY_ROOT % 'create')

        response_events = []
        accepted_events = []
        accepted_instances = set()
        instances = {}
        mappings = {}
        result = 200

        body_events = body['events']

        for _event in body_events:
            client_event = dict(_event)
            event = objects.InstanceExternalEvent(context)

            event.instance_uuid = client_event.pop('server_uuid')
            event.name = client_event.pop('name')
            event.status = client_event.pop('status', 'completed')
            event.tag = client_event.pop('tag', None)

            instance = instances.get(event.instance_uuid)
            if not instance:
                try:
                    mapping = objects.InstanceMapping.get_by_instance_uuid(
                        context, event.instance_uuid)
                    cell_mapping = mapping.cell_mapping
                    mappings[event.instance_uuid] = cell_mapping

                    # Load migration_context and info_cache here in a single DB
                    # operation because we need them later on
                    with nova_context.target_cell(context,
                                                  cell_mapping) as cctxt:
                        instance = objects.Instance.get_by_uuid(
                            cctxt,
                            event.instance_uuid,
                            expected_attrs=['migration_context', 'info_cache'])
                    instances[event.instance_uuid] = instance
                except (exception.InstanceNotFound,
                        exception.InstanceMappingNotFound):
                    LOG.debug(
                        'Dropping event %(name)s:%(tag)s for unknown '
                        'instance %(instance_uuid)s', {
                            'name': event.name,
                            'tag': event.tag,
                            'instance_uuid': event.instance_uuid
                        })
                    _event['status'] = 'failed'
                    _event['code'] = 404
                    result = 207

            # NOTE: before accepting the event, make sure the instance
            # for which the event is sent is assigned to a host; otherwise
            # it will not be possible to dispatch the event
            if instance:
                if instance.host:
                    accepted_events.append(event)
                    accepted_instances.add(instance)
                    LOG.info(
                        'Creating event %(name)s:%(tag)s for '
                        'instance %(instance_uuid)s on %(host)s', {
                            'name': event.name,
                            'tag': event.tag,
                            'instance_uuid': event.instance_uuid,
                            'host': instance.host
                        })
                    # NOTE: as the event is processed asynchronously verify
                    # whether 202 is a more suitable response code than 200
                    _event['status'] = 'completed'
                    _event['code'] = 200
                else:
                    LOG.debug(
                        "Unable to find a host for instance "
                        "%(instance)s. Dropping event %(event)s", {
                            'instance': event.instance_uuid,
                            'event': event.name
                        })
                    _event['status'] = 'failed'
                    _event['code'] = 422
                    result = 207

            response_events.append(_event)

        if accepted_events:
            self.compute_api.external_instance_event(context,
                                                     accepted_instances,
                                                     mappings, accepted_events)
        else:
            msg = _('No instances found for any event')
            raise webob.exc.HTTPNotFound(explanation=msg)

        # FIXME(cyeoh): This needs some infrastructure support so that
        # we have a general way to do this
        robj = wsgi.ResponseObject({'events': response_events})
        robj._code = result
        return robj
 def test_target_cell(self):
     self.useFixture(fixtures.SingleCellSimple())
     with context.target_cell(mock.sentinel.context, None) as c:
         self.assertIs(mock.sentinel.context, c)
 def dummy_tester(ctxt, cell_mapping, uuid):
     with context.target_cell(ctxt, cell_mapping) as cctxt:
         return objects.Instance.get_by_uuid(cctxt, uuid)
Esempio n. 49
0
    def _resize_and_validate(self,
                             volume_backed=False,
                             stopped=False,
                             target_host=None):
        """Creates and resizes the server to another cell. Validates various
        aspects of the server and its related records (allocations, migrations,
        actions, VIF tags, etc).

        :param volume_backed: True if the server should be volume-backed, False
            if image-backed.
        :param stopped: True if the server should be stopped prior to resize,
            False if the server should be ACTIVE
        :param target_host: If not None, triggers a cold migration to the
            specified host.
        :returns: tuple of:
            - server response object
            - source compute node resource provider uuid
            - target compute node resource provider uuid
            - old flavor
            - new flavor
        """
        # Create the server.
        flavors = self.api.get_flavors()
        old_flavor = flavors[0]
        server = self._create_server(old_flavor, volume_backed=volume_backed)
        original_host = server['OS-EXT-SRV-ATTR:host']
        image_uuid = None if volume_backed else server['image']['id']

        # Our HostNameWeigher ensures the server starts in cell1, so we expect
        # the server AZ to be cell1 as well.
        self.assertEqual('cell1', server['OS-EXT-AZ:availability_zone'])

        if stopped:
            # Stop the server before resizing it.
            self.api.post_server_action(server['id'], {'os-stop': None})
            self._wait_for_state_change(self.api, server, 'SHUTOFF')

        # Before resizing make sure quota usage is only 1 for total instances.
        self.assert_quota_usage(expected_num_instances=1)

        if target_host:
            # Cold migrate the server to the target host.
            new_flavor = old_flavor  # flavor does not change for cold migrate
            body = {'migrate': {'host': target_host}}
            expected_host = target_host
        else:
            # Resize it which should migrate the server to the host in the
            # other cell.
            new_flavor = flavors[1]
            body = {'resize': {'flavorRef': new_flavor['id']}}
            expected_host = 'host1' if original_host == 'host2' else 'host2'

        self.stub_image_create()

        self.api.post_server_action(server['id'], body)
        # Wait for the server to be resized and then verify the host has
        # changed to be the host in the other cell.
        server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
        self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
        # Assert that the instance is only listed one time from the API (to
        # make sure it's not listed out of both cells).
        # Note that we only get one because the DB API excludes hidden
        # instances by default (see instance_get_all_by_filters_sort).
        servers = self.api.get_servers()
        self.assertEqual(1, len(servers),
                         'Unexpected number of servers: %s' % servers)
        self.assertEqual(expected_host, servers[0]['OS-EXT-SRV-ATTR:host'])

        # And that there is only one migration record.
        migrations = self.api.api_get('/os-migrations?instance_uuid=%s' %
                                      server['id']).body['migrations']
        self.assertEqual(
            1, len(migrations),
            'Unexpected number of migrations records: %s' % migrations)
        migration = migrations[0]
        self.assertEqual('finished', migration['status'])

        # There should be at least two actions, one for create and one for the
        # resize. There will be a third action if the server was stopped.
        actions = self.api.api_get('/servers/%s/os-instance-actions' %
                                   server['id']).body['instanceActions']
        expected_num_of_actions = 3 if stopped else 2
        self.assertEqual(expected_num_of_actions, len(actions), actions)
        # Each action should have events (make sure these were copied from
        # the source cell to the target cell).
        for action in actions:
            detail = self.api.api_get(
                '/servers/%s/os-instance-actions/%s' %
                (server['id'], action['request_id'])).body['instanceAction']
            self.assertNotEqual(0, len(detail['events']), detail)

        # The tag should still be present on the server.
        self.assertEqual(1, len(server['tags']),
                         'Server tags not found in target cell.')
        self.assertEqual('test', server['tags'][0])

        # Confirm the source node has allocations for the old flavor and the
        # target node has allocations for the new flavor.
        source_rp_uuid = self._get_provider_uuid_by_host(original_host)
        # The source node allocations should be on the migration record.
        source_allocations = self._get_allocations_by_provider_uuid(
            source_rp_uuid)[migration['uuid']]['resources']
        self.assertFlavorMatchesAllocation(old_flavor,
                                           source_allocations,
                                           volume_backed=volume_backed)

        target_rp_uuid = self._get_provider_uuid_by_host(expected_host)
        # The target node allocations should be on the instance record.
        target_allocations = self._get_allocations_by_provider_uuid(
            target_rp_uuid)[server['id']]['resources']
        self.assertFlavorMatchesAllocation(new_flavor,
                                           target_allocations,
                                           volume_backed=volume_backed)

        # The instance, in the target cell DB, should have the old and new
        # flavor stored with it with the values we expect at this point.
        target_cell_name = self.host_to_cell_mappings[expected_host]
        self.assertEqual(target_cell_name,
                         server['OS-EXT-AZ:availability_zone'])
        target_cell = self.cell_mappings[target_cell_name]
        admin_context = nova_context.get_admin_context()
        with nova_context.target_cell(admin_context, target_cell) as cctxt:
            inst = objects.Instance.get_by_uuid(cctxt,
                                                server['id'],
                                                expected_attrs=['flavor'])
            self.assertIsNotNone(
                inst.old_flavor,
                'instance.old_flavor not saved in target cell')
            self.assertIsNotNone(
                inst.new_flavor,
                'instance.new_flavor not saved in target cell')
            self.assertEqual(inst.flavor.flavorid, inst.new_flavor.flavorid)
            if target_host:  # cold migrate so flavor does not change
                self.assertEqual(inst.flavor.flavorid,
                                 inst.old_flavor.flavorid)
            else:
                self.assertNotEqual(inst.flavor.flavorid,
                                    inst.old_flavor.flavorid)
            self.assertEqual(old_flavor['id'], inst.old_flavor.flavorid)
            self.assertEqual(new_flavor['id'], inst.new_flavor.flavorid)
            # Assert the ComputeManager._set_instance_info fields
            # are correct after the resize.
            self.assert_instance_fields_match_flavor(inst, new_flavor)
            # The availability_zone field in the DB should also be updated.
            self.assertEqual(target_cell_name, inst.availability_zone)

        # Assert the VIF tag was carried through to the target cell DB.
        interface_attachments = self.api.get_port_interfaces(server['id'])
        self.assertEqual(1, len(interface_attachments))
        self.assertEqual('private', interface_attachments[0]['tag'])

        if volume_backed:
            # Assert the BDM tag was carried through to the target cell DB.
            volume_attachments = self.api.get_server_volumes(server['id'])
            self.assertEqual(1, len(volume_attachments))
            self.assertEqual('root', volume_attachments[0]['tag'])

        # Make sure the guest is no longer tracked on the source node.
        source_guest_uuids = (
            self.computes[original_host].manager.driver.list_instance_uuids())
        self.assertNotIn(server['id'], source_guest_uuids)
        # And the guest is on the target node hypervisor.
        target_guest_uuids = (
            self.computes[expected_host].manager.driver.list_instance_uuids())
        self.assertIn(server['id'], target_guest_uuids)

        # The source hypervisor continues to report usage in the hypervisors
        # API because even though the guest was destroyed there, the instance
        # resources are still claimed on that node in case the user reverts.
        self.assert_hypervisor_usage(source_rp_uuid, old_flavor, volume_backed)
        # The new flavor should show up with resource usage on the target host.
        self.assert_hypervisor_usage(target_rp_uuid, new_flavor, volume_backed)

        # While we have a copy of the instance in each cell database make sure
        # that quota usage is only reporting 1 (because one is hidden).
        self.assert_quota_usage(expected_num_instances=1)

        # For a volume-backed server, at this point there should be two volume
        # attachments for the instance: one tracked in the source cell and
        # one in the target cell.
        if volume_backed:
            self.assertEqual(2, self._count_volume_attachments(server['id']),
                             self.cinder.volume_to_attachment)

        # Assert the expected power state.
        expected_power_state = 4 if stopped else 1
        self.assertEqual(expected_power_state,
                         server['OS-EXT-STS:power_state'],
                         "Unexpected power state after resize.")

        # For an image-backed server, a snapshot image should have been created
        # and then deleted during the resize.
        if volume_backed:
            self.assertEqual('', server['image'])
            self.assertEqual(
                0, len(self.created_images),
                "Unexpected image create during volume-backed resize")
        else:
            # The original image for the server shown in the API should not
            # have changed even if a snapshot was used to create the guest
            # on the dest host.
            self.assertEqual(image_uuid, server['image']['id'])
            self.assertEqual(
                1, len(self.created_images),
                "Unexpected number of images created for image-backed resize")
            # Make sure the temporary snapshot image was deleted; we use the
            # compute images proxy API here which is deprecated so we force the
            # microversion to 2.1.
            with utils.temporary_mutation(self.api, microversion='2.1'):
                self.api.api_get('/images/%s' % self.created_images[0],
                                 check_response_status=[404])

        return server, source_rp_uuid, target_rp_uuid, old_flavor, new_flavor
Esempio n. 50
0
    def _check_ironic_flavor_migration(self):
        """In Pike, ironic instances and flavors need to be migrated to use
        custom resource classes. In ironic, the node.resource_class should be
        set to some custom resource class value which should match a
        "resources:<custom resource class name>" flavor extra spec on baremetal
        flavors. Existing ironic instances will have their embedded
        instance.flavor.extra_specs migrated to use the matching ironic
        node.resource_class value in the nova-compute service, or they can
        be forcefully migrated using "nova-manage db ironic_flavor_migration".

        In this check, we look for all ironic compute nodes in all non-cell0
        cells, and from those ironic compute nodes, we look for an instance
        that has a "resources:CUSTOM_*" key in it's embedded flavor extra
        specs.
        """
        cell_mappings = self._get_non_cell0_mappings()
        ctxt = nova_context.get_admin_context()
        # dict of cell identifier (name or uuid) to number of unmigrated
        # instances
        unmigrated_instance_count_by_cell = collections.defaultdict(int)
        for cell_mapping in cell_mappings:
            with nova_context.target_cell(ctxt, cell_mapping) as cctxt:
                # Get the (non-deleted) ironic compute nodes in this cell.
                meta = MetaData(bind=db_session.get_engine(context=cctxt))
                compute_nodes = Table('compute_nodes', meta, autoload=True)
                ironic_nodes = (compute_nodes.select().where(
                    and_(compute_nodes.c.hypervisor_type == 'ironic',
                         compute_nodes.c.deleted == 0)).execute().fetchall())

                if ironic_nodes:
                    # We have ironic nodes in this cell, let's iterate over
                    # them looking for instances.
                    instances = Table('instances', meta, autoload=True)
                    extras = Table('instance_extra', meta, autoload=True)
                    for node in ironic_nodes:
                        nodename = node['hypervisor_hostname']
                        # Get any (non-deleted) instances for this node.
                        ironic_instances = (instances.select().where(
                            and_(instances.c.node == nodename,
                                 instances.c.deleted ==
                                 0)).execute().fetchall())
                        # Get the instance_extras for each instance so we can
                        # find the flavors.
                        for inst in ironic_instances:
                            if not self._is_ironic_instance_migrated(
                                    extras, inst):
                                # We didn't find the extra spec key for this
                                # instance so increment the number of
                                # unmigrated instances in this cell.
                                unmigrated_instance_count_by_cell[
                                    cell_mapping.uuid] += 1

        if not cell_mappings:
            # There are no non-cell0 mappings so we can't determine this, just
            # return a warning. The cellsv2 check would have already failed
            # on this.
            msg = (_('Unable to determine ironic flavor migration without '
                     'cell mappings.'))
            return upgradecheck.Result(upgradecheck.Code.WARNING, msg)

        if unmigrated_instance_count_by_cell:
            # There are unmigrated ironic instances, so we need to fail.
            msg = (
                _('There are (cell=x) number of unmigrated instances in '
                  'each cell: %s. Run \'nova-manage db '
                  'ironic_flavor_migration\' on each cell.') %
                ' '.join('(%s=%s)' %
                         (cell_id, unmigrated_instance_count_by_cell[cell_id])
                         for cell_id in sorted(
                             unmigrated_instance_count_by_cell.keys())))
            return upgradecheck.Result(upgradecheck.Code.FAILURE, msg)

        # Either there were no ironic compute nodes or all instances for
        # those nodes are already migrated, so there is nothing to do.
        return upgradecheck.Result(upgradecheck.Code.SUCCESS)
Esempio n. 51
0
    def _create_instances(self,
                          pre_newton=2,
                          deleted=0,
                          total=5,
                          target_cell=None):
        if not target_cell:
            target_cell = self.cells[1]

        instances = []
        with context.target_cell(self.context, target_cell) as cctxt:
            flav_dict = objects.Flavor._flavor_get_from_db(cctxt, 1)
            flavor = objects.Flavor(**flav_dict)
            for i in range(0, total):
                inst = objects.Instance(
                    context=cctxt,
                    project_id=self.api.project_id,
                    user_id=FAKE_UUID,
                    vm_state='active',
                    flavor=flavor,
                    created_at=datetime.datetime(1985, 10, 25, 1, 21, 0),
                    launched_at=datetime.datetime(1985, 10, 25, 1, 22, 0),
                    host=self.computes[0].host,
                    hostname='%s-inst%i' % (target_cell.name, i))
                inst.create()

                info_cache = objects.InstanceInfoCache(context=cctxt)
                info_cache.updated_at = timeutils.utcnow()
                info_cache.network_info = network_model.NetworkInfo()
                info_cache.instance_uuid = inst.uuid
                info_cache.save()

                instances.append(inst)

                im = objects.InstanceMapping(context=cctxt,
                                             project_id=inst.project_id,
                                             user_id=inst.user_id,
                                             instance_uuid=inst.uuid,
                                             cell_mapping=target_cell)
                im.create()

        # Attach fake interfaces to instances
        network_id = list(self.neutron._networks.keys())[0]
        for i in range(0, len(instances)):
            for k in range(0, 4):
                self.api.attach_interface(
                    instances[i].uuid,
                    {"interfaceAttachment": {
                        "net_id": network_id
                    }})

        with context.target_cell(self.context, target_cell) as cctxt:
            # Fake the pre-newton behaviour by removing the
            # VirtualInterfacesList objects.
            if pre_newton:
                for i in range(0, pre_newton):
                    _delete_vif_list(cctxt, instances[i].uuid)

        if deleted:
            # Delete from the end of active instances list
            for i in range(total - deleted, total):
                instances[i].destroy()

        self.instances += instances
Esempio n. 52
0
def discover_hosts(ctxt, cell_uuid=None, status_fn=None):
    # TODO(alaski): If this is not run on a host configured to use the API
    # database most of the lookups below will fail and may not provide a
    # great error message. Add a check which will raise a useful error
    # message about running this from an API host.

    from nova import objects

    if not status_fn:
        status_fn = lambda x: None

    if cell_uuid:
        cell_mappings = [objects.CellMapping.get_by_uuid(ctxt, cell_uuid)]
    else:
        cell_mappings = objects.CellMappingList.get_all(ctxt)
        status_fn(_('Found %s cell mappings.') % len(cell_mappings))

    host_mappings = []
    for cm in cell_mappings:
        if cm.is_cell0():
            status_fn(_('Skipping cell0 since it does not contain hosts.'))
            continue
        if 'name' in cm and cm.name:
            status_fn(
                _("Getting compute nodes from cell '%(name)s': "
                  "%(uuid)s") % {
                      'name': cm.name,
                      'uuid': cm.uuid
                  })
        else:
            status_fn(
                _("Getting compute nodes from cell: %(uuid)s") %
                {'uuid': cm.uuid})
        with context.target_cell(ctxt, cm):
            compute_nodes = objects.ComputeNodeList.get_all(ctxt)
            status_fn(
                _('Found %(num)s computes in cell: %(uuid)s') % {
                    'num': len(compute_nodes),
                    'uuid': cm.uuid
                })
        for compute in compute_nodes:
            status_fn(
                _("Checking host mapping for compute host "
                  "'%(host)s': %(uuid)s") % {
                      'host': compute.host,
                      'uuid': compute.uuid
                  })
            try:
                objects.HostMapping.get_by_host(ctxt, compute.host)
            except exception.HostMappingNotFound:
                status_fn(
                    _("Creating host mapping for compute host "
                      "'%(host)s': %(uuid)s") % {
                          'host': compute.host,
                          'uuid': compute.uuid
                      })
                host_mapping = objects.HostMapping(ctxt,
                                                   host=compute.host,
                                                   cell_mapping=cm)
                host_mapping.create()
                host_mappings.append(host_mapping)
    return host_mappings
Esempio n. 53
0
    def test_populate_queued_for_delete(self):
        cells = []
        celldbs = fixtures.CellDatabases()

        # Create two cell databases and map them
        for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
            cm = cell_mapping.CellMapping(context=self.context,
                                          uuid=uuid,
                                          database_connection=uuid,
                                          transport_url='fake://')
            cm.create()
            cells.append(cm)
            celldbs.add_cell_database(uuid)
        self.useFixture(celldbs)

        # Create 5 instances per cell, two deleted, one with matching
        # queued_for_delete in the instance mapping
        for cell in cells:
            for i in range(0, 5):
                # Instance 4 should be SOFT_DELETED
                vm_state = (vm_states.SOFT_DELETED
                            if i == 4 else vm_states.ACTIVE)

                # Instance 2 should already be marked as queued_for_delete
                qfd = True if i == 2 else None

                with context.target_cell(self.context, cell) as cctxt:
                    inst = instance.Instance(
                        cctxt,
                        vm_state=vm_state,
                        project_id=self.context.project_id,
                        user_id=self.context.user_id)
                    inst.create()
                    if i in (2, 3):
                        # Instances 2 and 3 are hard-deleted
                        inst.destroy()

                instance_mapping.InstanceMapping._create_in_db(
                    self.context, {
                        'project_id': self.context.project_id,
                        'cell_id': cell.id,
                        'queued_for_delete': qfd,
                        'instance_uuid': inst.uuid
                    })

        done, total = instance_mapping.populate_queued_for_delete(
            self.context, 2)
        # First two needed fixing, and honored the limit
        self.assertEqual(2, done)
        self.assertEqual(2, total)

        done, total = instance_mapping.populate_queued_for_delete(
            self.context, 1000)

        # Last six included two that were already done, and spanned to the
        # next cell
        self.assertEqual(6, done)
        self.assertEqual(6, total)

        mappings = instance_mapping.InstanceMappingList.get_by_project_id(
            self.context, self.context.project_id)

        # Check that we have only the expected number of records with
        # True/False (which implies no NULL records).

        # Six deleted instances
        self.assertEqual(
            6, len([im for im in mappings if im.queued_for_delete is True]))
        # Four non-deleted instances
        self.assertEqual(
            4, len([im for im in mappings if im.queued_for_delete is False]))

        # Run it again to make sure we don't query the cell database for
        # instances if we didn't get any un-migrated mappings.
        with mock.patch('nova.objects.InstanceList.get_by_filters',
                        new_callable=mock.NonCallableMock):
            done, total = instance_mapping.populate_queued_for_delete(
                self.context, 1000)
        self.assertEqual(0, done)
        self.assertEqual(0, total)
Esempio n. 54
0
    def test_populate_user_id(self, mock_log_warning):
        cells = []
        celldbs = fixtures.CellDatabases()

        # Create two cell databases and map them
        for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
            cm = cell_mapping.CellMapping(context=self.context,
                                          uuid=uuid,
                                          database_connection=uuid,
                                          transport_url='fake://')
            cm.create()
            cells.append(cm)
            celldbs.add_cell_database(uuid)
        self.useFixture(celldbs)

        # Create 5 instances per cell
        for cell in cells:
            for i in range(0, 5):
                with context.target_cell(self.context, cell) as cctxt:
                    inst = instance.Instance(
                        cctxt,
                        project_id=self.context.project_id,
                        user_id=self.context.user_id)
                    inst.create()
                # Make every other mapping have a NULL user_id
                # Will be a total of four mappings with NULL user_id
                user_id = self.context.user_id if i % 2 == 0 else None
                create_mapping(project_id=self.context.project_id,
                               user_id=user_id,
                               cell_id=cell.id,
                               instance_uuid=inst.uuid)

        # Create a SOFT_DELETED instance with a user_id=None instance mapping.
        # This should get migrated.
        with context.target_cell(self.context, cells[0]) as cctxt:
            inst = instance.Instance(cctxt,
                                     project_id=self.context.project_id,
                                     user_id=self.context.user_id,
                                     vm_state=vm_states.SOFT_DELETED)
            inst.create()
        create_mapping(project_id=self.context.project_id,
                       user_id=None,
                       cell_id=cells[0].id,
                       instance_uuid=inst.uuid,
                       queued_for_delete=True)

        # Create a deleted instance with a user_id=None instance mapping.
        # This should get migrated.
        with context.target_cell(self.context, cells[1]) as cctxt:
            inst = instance.Instance(cctxt,
                                     project_id=self.context.project_id,
                                     user_id=self.context.user_id)
            inst.create()
            inst.destroy()
        create_mapping(project_id=self.context.project_id,
                       user_id=None,
                       cell_id=cells[1].id,
                       instance_uuid=inst.uuid,
                       queued_for_delete=True)

        # Create an instance mapping for an instance not yet scheduled. It
        # should not get migrated because we won't know what user_id to use.
        unscheduled = create_mapping(project_id=self.context.project_id,
                                     user_id=None,
                                     cell_id=None)

        # Create two instance mappings for instances that no longer exist.
        # Example: residue from a manual cleanup or after a periodic compute
        # purge and before a database archive. This record should not get
        # migrated.
        nonexistent = []
        for i in range(2):
            nonexistent.append(
                create_mapping(project_id=self.context.project_id,
                               user_id=None,
                               cell_id=cells[i].id,
                               instance_uuid=uuidutils.generate_uuid()))

        # Create an instance mapping simulating a virtual interface migration
        # marker instance which has had map_instances run on it.
        # This should not be found by the migration.
        create_mapping(project_id=virtual_interface.FAKE_UUID, user_id=None)

        found, done = instance_mapping.populate_user_id(self.context, 2)
        # Two needed fixing, and honored the limit.
        self.assertEqual(2, found)
        self.assertEqual(2, done)

        found, done = instance_mapping.populate_user_id(self.context, 1000)
        # Only four left were fixable. The fifth instance found has no
        # cell and cannot be migrated yet. The 6th and 7th instances found have
        # no corresponding instance records and cannot be migrated.
        self.assertEqual(7, found)
        self.assertEqual(4, done)

        # Verify the orphaned instance mappings warning log message was only
        # emitted once.
        mock_log_warning.assert_called_once()

        # Check that we have only the expected number of records with
        # user_id set. We created 10 instances (5 per cell with 2 per cell
        # with NULL user_id), 1 SOFT_DELETED instance with NULL user_id,
        # 1 deleted instance with NULL user_id, and 1 not-yet-scheduled
        # instance with NULL user_id.
        # We expect 12 of them to have user_id set after migration (15 total,
        # with the not-yet-scheduled instance and the orphaned instance
        # mappings ignored).
        ims = instance_mapping.InstanceMappingList.get_by_project_id(
            self.context, self.context.project_id)
        self.assertEqual(12, len([im for im in ims if 'user_id' in im]))

        # Check that one instance mapping record (not yet scheduled) has not
        # been migrated by this script.
        # Check that two other instance mapping records (no longer existing
        # instances) have not been migrated by this script.
        self.assertEqual(15, len(ims))

        # Set the cell and create the instance for the mapping without a cell,
        # then run the migration again.
        unscheduled = instance_mapping.InstanceMapping.get_by_instance_uuid(
            self.context, unscheduled['instance_uuid'])
        unscheduled.cell_mapping = cells[0]
        unscheduled.save()
        with context.target_cell(self.context, cells[0]) as cctxt:
            inst = instance.Instance(cctxt,
                                     uuid=unscheduled.instance_uuid,
                                     project_id=self.context.project_id,
                                     user_id=self.context.user_id)
            inst.create()
        found, done = instance_mapping.populate_user_id(self.context, 1000)
        # Should have found the not-yet-scheduled instance and the orphaned
        # instance mappings.
        self.assertEqual(3, found)
        # Should have only migrated the not-yet-schedule instance.
        self.assertEqual(1, done)

        # Delete the orphaned instance mapping (simulate manual cleanup by an
        # operator).
        for db_im in nonexistent:
            nonexist = instance_mapping.InstanceMapping.get_by_instance_uuid(
                self.context, db_im['instance_uuid'])
            nonexist.destroy()

        # Run the script one last time to make sure it finds nothing left to
        # migrate.
        found, done = instance_mapping.populate_user_id(self.context, 1000)
        self.assertEqual(0, found)
        self.assertEqual(0, done)
Esempio n. 55
0
    def test_compute_node_get_all_uuid_marker(self):
        """Tests paging over multiple cells with a uuid marker.

        This test is going to setup three compute nodes in two cells for a
        total of six compute nodes. Then it will page over them with a limit
        of two so there should be three pages total.
        """
        # create the compute nodes in the non-cell0 cells
        count = 0
        for cell in self.cell_mappings[1:]:
            for x in range(3):
                compute_node_uuid = getattr(uuids, 'node_%s' % count)
                with context.target_cell(self.ctxt, cell) as cctxt:
                    node = objects.ComputeNode(cctxt,
                                               uuid=compute_node_uuid,
                                               host=compute_node_uuid,
                                               vcpus=2,
                                               memory_mb=2048,
                                               local_gb=128,
                                               vcpus_used=0,
                                               memory_mb_used=0,
                                               local_gb_used=0,
                                               cpu_info='{}',
                                               hypervisor_type='fake',
                                               hypervisor_version=10)
                    node.create()
                    count += 1

                # create a host mapping for the compute to link it to the cell
                host_mapping = objects.HostMapping(self.ctxt,
                                                   host=compute_node_uuid,
                                                   cell_mapping=cell)
                host_mapping.create()

        # now start paging with a limit of two per page; the first page starts
        # with no marker
        compute_nodes = self.host_api.compute_node_get_all(self.ctxt, limit=2)
        # assert that we got two compute nodes from cell1
        self.assertEqual(2, len(compute_nodes))
        for compute_node in compute_nodes:
            host_mapping = objects.HostMapping.get_by_host(
                self.ctxt, compute_node.host)
            self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid)

        # now our marker is the last item in the first page
        marker = compute_nodes[-1].uuid
        compute_nodes = self.host_api.compute_node_get_all(self.ctxt,
                                                           limit=2,
                                                           marker=marker)
        # assert that we got the last compute node from cell1 and the first
        # compute node from cell2
        self.assertEqual(2, len(compute_nodes))
        host_mapping = objects.HostMapping.get_by_host(self.ctxt,
                                                       compute_nodes[0].host)
        self.assertEqual(uuids.cell1, host_mapping.cell_mapping.uuid)
        host_mapping = objects.HostMapping.get_by_host(self.ctxt,
                                                       compute_nodes[1].host)
        self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid)

        # now our marker is the last item in the second page; make the limit=3
        # so we make sure we've exhausted the pages
        marker = compute_nodes[-1].uuid
        compute_nodes = self.host_api.compute_node_get_all(self.ctxt,
                                                           limit=3,
                                                           marker=marker)
        # assert that we got two compute nodes from cell2
        self.assertEqual(2, len(compute_nodes))
        for compute_node in compute_nodes:
            host_mapping = objects.HostMapping.get_by_host(
                self.ctxt, compute_node.host)
            self.assertEqual(uuids.cell2, host_mapping.cell_mapping.uuid)
Esempio n. 56
0
    def test_bfv_quota_race_local_delete(self):
        # Setup a boot-from-volume request where the API will create a
        # volume attachment record for the given pre-existing volume.
        # We also tag the server since tags, like BDMs, should be created in
        # the cell database along with the instance.
        volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
        server = {
            'server': {
                'name': 'test_bfv_quota_race_local_delete',
                'flavorRef': self.api.get_flavors()[0]['id'],
                'imageRef': '',
                'block_device_mapping_v2': [{
                    'boot_index': 0,
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'uuid': volume_id
                }],
                'networks': 'auto',
                'tags': ['bfv']
            }
        }

        # Now we need to stub out the quota check routine so that we can
        # simulate the race where the initial quota check in the API passes
        # but fails in conductor once the instance has been created in cell1.
        original_quota_check = compute_utils.check_num_instances_quota

        def stub_check_num_instances_quota(_self, context, instance_type,
                                           min_count, *args, **kwargs):
            # Determine where we are in the flow based on whether or not the
            # min_count is 0 (API will pass 1, conductor will pass 0).
            if min_count == 0:
                raise exception.TooManyInstances(
                    'test_bfv_quota_race_local_delete')
            # We're checking from the API so perform the original quota check.
            return original_quota_check(
                _self, context, instance_type, min_count, *args, **kwargs)

        self.stub_out('nova.compute.utils.check_num_instances_quota',
                      stub_check_num_instances_quota)

        server = self.api.post_server(server)
        server = self._wait_for_state_change(server, 'ERROR')
        # At this point, the build request should be gone and the instance
        # should have been created in cell1.
        context = nova_context.get_admin_context()
        self.assertRaises(exception.BuildRequestNotFound,
                          objects.BuildRequest.get_by_instance_uuid,
                          context, server['id'])
        # The default cell in the functional tests is cell1 but we want to
        # specifically target cell1 to make sure the instance exists there
        # and we're not just getting lucky somehow due to the fixture.
        cell1 = self.cell_mappings[test.CELL1_NAME]
        with nova_context.target_cell(context, cell1) as cctxt:
            # This would raise InstanceNotFound if the instance isn't in cell1.
            instance = objects.Instance.get_by_uuid(cctxt, server['id'])
            self.assertIsNone(instance.host, 'instance.host should not be set')
            # Make sure the BDMs and tags also exist in cell1.
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                cctxt, instance.uuid)
            self.assertEqual(1, len(bdms), 'BDMs were not created in cell1')
            tags = objects.TagList.get_by_resource_id(cctxt, instance.uuid)
            self.assertEqual(1, len(tags), 'Tags were not created in cell1')

        # Make sure we can still view the tags on the server before it is
        # deleted.
        self.assertEqual(['bfv'], server['tags'])

        # Now delete the server which, since it does not have a host, will be
        # deleted "locally" from the API.
        self._delete_server(server)

        # The volume should have been detached by the API.
        attached_volumes = self.cinder_fixture.volume_ids_for_instance(
            server['id'])
        # volume_ids_for_instance is a generator so listify
        self.assertEqual(0, len(list(attached_volumes)))
Esempio n. 57
0
 def _get_instances_by_host(self, context, host_name):
     hm = objects.HostMapping.get_by_host(context, host_name)
     with context_module.target_cell(context, hm.cell_mapping):
         inst_list = objects.InstanceList.get_by_host(context, host_name)
         return {inst.uuid: inst for inst in inst_list}