コード例 #1
0
    def test_aggregate_multitenancy_isolation_filter(self):
        """Tests common scenarios with the AggregateMultiTenancyIsolation
        filter:

        * hosts in a tenant-isolated aggregate are only accepted for that
          tenant
        * hosts not in a tenant-isolated aggregate are acceptable for all
          tenants, including tenants with access to the isolated-tenant
          aggregate
        """
        # Create a tenant-isolated aggregate for the non-admin user.
        user_api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1',
                                       project_id=uuids.non_admin)).api
        agg_id = self.admin_api.post_aggregate(
            {'aggregate': {'name': 'non_admin_agg'}})['id']
        meta_req = {'set_metadata': {
            'metadata': {'filter_tenant_id': uuids.non_admin}}}
        self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, meta_req)
        # Add host2 to the aggregate; we'll restrict host2 to the non-admin
        # tenant.
        host_req = {'add_host': {'host': 'host2'}}
        self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, host_req)
        # Stub out select_destinations to assert how many host candidates were
        # available per tenant-specific request.
        original_filtered_hosts = (
            nova.scheduler.host_manager.HostManager.get_filtered_hosts)

        def spy_get_filtered_hosts(*args, **kwargs):
            self.filtered_hosts = original_filtered_hosts(*args, **kwargs)
            return self.filtered_hosts
        self.stub_out(
            'nova.scheduler.host_manager.HostManager.get_filtered_hosts',
            spy_get_filtered_hosts)
        # Create a server for the admin - should only have one host candidate.
        server_req = self._build_minimal_create_server_request(
            self.admin_api,
            'test_aggregate_multitenancy_isolation_filter-admin',
            networks='none')  # requires microversion 2.37
        server_req = {'server': server_req}
        with utils.temporary_mutation(self.admin_api, microversion='2.37'):
            server = self.admin_api.post_server(server_req)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Assert it's not on host2 which is isolated to the non-admin tenant.
        self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
        self.assertEqual(1, len(self.filtered_hosts))
        # Now create a server for the non-admin tenant to which host2 is
        # isolated via the aggregate, but the other compute host is a
        # candidate. We don't assert that the non-admin tenant server shows
        # up on host2 because the other host, which is not isolated to the
        # aggregate, is still a candidate.
        server_req = self._build_minimal_create_server_request(
            user_api,
            'test_aggregate_multitenancy_isolation_filter-user',
            networks='none')  # requires microversion 2.37
        server_req = {'server': server_req}
        with utils.temporary_mutation(user_api, microversion='2.37'):
            server = user_api.post_server(server_req)
        self._wait_for_state_change(user_api, server, 'ACTIVE')
        self.assertEqual(2, len(self.filtered_hosts))
コード例 #2
0
ファイル: test_aggregates.py プロジェクト: mahak/nova
    def test_aggregate_multitenancy_isolation_filter(self):
        """Tests common scenarios with the AggregateMultiTenancyIsolation
        filter:

        * hosts in a tenant-isolated aggregate are only accepted for that
          tenant
        * hosts not in a tenant-isolated aggregate are acceptable for all
          tenants, including tenants with access to the isolated-tenant
          aggregate
        """
        # Create a tenant-isolated aggregate for the non-admin user.
        user_api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1',
                                       project_id=uuids.non_admin)).api
        agg_id = self.admin_api.post_aggregate(
            {'aggregate': {'name': 'non_admin_agg'}})['id']
        meta_req = {'set_metadata': {
            'metadata': {'filter_tenant_id': uuids.non_admin}}}
        self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, meta_req)
        # Add host2 to the aggregate; we'll restrict host2 to the non-admin
        # tenant.
        host_req = {'add_host': {'host': 'host2'}}
        self.admin_api.api_post('/os-aggregates/%s/action' % agg_id, host_req)
        # Stub out select_destinations to assert how many host candidates were
        # available per tenant-specific request.
        original_filtered_hosts = (
            nova.scheduler.host_manager.HostManager.get_filtered_hosts)

        def spy_get_filtered_hosts(*args, **kwargs):
            self.filtered_hosts = original_filtered_hosts(*args, **kwargs)
            return self.filtered_hosts
        self.stub_out(
            'nova.scheduler.host_manager.HostManager.get_filtered_hosts',
            spy_get_filtered_hosts)
        # Create a server for the admin - should only have one host candidate.
        server_req = self._build_minimal_create_server_request(
            self.admin_api,
            'test_aggregate_multitenancy_isolation_filter-admin',
            networks='none')  # requires microversion 2.37
        server_req = {'server': server_req}
        with utils.temporary_mutation(self.admin_api, microversion='2.37'):
            server = self.admin_api.post_server(server_req)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Assert it's not on host2 which is isolated to the non-admin tenant.
        self.assertNotEqual('host2', server['OS-EXT-SRV-ATTR:host'])
        self.assertEqual(1, len(self.filtered_hosts))
        # Now create a server for the non-admin tenant to which host2 is
        # isolated via the aggregate, but the other compute host is a
        # candidate. We don't assert that the non-admin tenant server shows
        # up on host2 because the other host, which is not isolated to the
        # aggregate, is still a candidate.
        server_req = self._build_minimal_create_server_request(
            user_api,
            'test_aggregate_multitenancy_isolation_filter-user',
            networks='none')  # requires microversion 2.37
        server_req = {'server': server_req}
        with utils.temporary_mutation(user_api, microversion='2.37'):
            server = user_api.post_server(server_req)
        self._wait_for_state_change(user_api, server, 'ACTIVE')
        self.assertEqual(2, len(self.filtered_hosts))
コード例 #3
0
ファイル: api.py プロジェクト: gminator/nova
 def quota_rollback(self, context, reservations, project_id=None):
     # FIXME(comstud): bug 1153795: Conductor manager should accept
     # a project_id kwarg to be able to pass to the quota rollback call.
     if project_id is None:
         project_id = context.project_id
     with utils.temporary_mutation(context, project_id=project_id):
         return self.conductor_rpcapi.quota_rollback(context, reservations)
コード例 #4
0
 def test_bury_in_cell0_instance_create_action(self):
     """Tests creating a server which will fail scheduling because there is
     no compute service and result in the instance being created (buried)
     in cell0.
     """
     server = self._build_server(networks='none')
     # Use microversion 2.37 to create a server without any networking.
     with utils.temporary_mutation(self.api, microversion='2.37'):
         server = self.api.post_server({'server': server})
     # The server should go to ERROR status and have a NoValidHost fault.
     server = self._wait_for_state_change(server, 'ERROR')
     self.assertIn('fault', server)
     self.assertIn('No valid host', server['fault']['message'])
     self.assertEqual('', server['hostId'])
     # Assert the "create" instance action exists and is failed.
     actions = self.api.get_instance_actions(server['id'])
     self.assertEqual(1, len(actions), actions)
     action = actions[0]
     self.assertEqual(instance_actions.CREATE, action['action'])
     self.assertEqual('Error', action['message'])
     # Get the events. There should be one with an Error result.
     action = self.api.api_get(
         '/servers/%s/os-instance-actions/%s' %
         (server['id'], action['request_id'])).body['instanceAction']
     events = action['events']
     self.assertEqual(1, len(events), events)
     event = events[0]
     self.assertEqual('conductor_schedule_and_build_instances',
                      event['event'])
     self.assertEqual('Error', event['result'])
     # Normally non-admins cannot see the event traceback but we enabled
     # that via policy in setUp so assert something was recorded.
     self.assertIn('select_destinations', event['traceback'])
コード例 #5
0
    def delete_server_and_assert_cleanup(self, server):
        """Deletes the server and makes various cleanup checks.

        - makes sure allocations from placement are gone
        - makes sure the instance record is gone from both cells
        - makes sure there are no leaked volume attachments

        :param server: dict of the server resource to delete
        """
        # Determine which cell the instance was in when the server was deleted
        # in the API so we can check hard vs soft delete in the DB.
        current_cell = self.host_to_cell_mappings[
            server['OS-EXT-SRV-ATTR:host']]
        # Delete the server and check that the allocations are gone from
        # the placement service.
        self._delete_and_check_allocations(server)
        # Make sure the instance record is gone from both cell databases.
        ctxt = nova_context.get_admin_context()
        for cell_name in self.host_to_cell_mappings.values():
            cell = self.cell_mappings[cell_name]
            with nova_context.target_cell(ctxt, cell) as cctxt:
                # If this is the current cell the instance was in when it was
                # deleted it should be soft-deleted (instance.deleted!=0),
                # otherwise it should be hard-deleted and getting it with a
                # read_deleted='yes' context should still raise.
                read_deleted = 'no' if current_cell == cell_name else 'yes'
                with utils.temporary_mutation(
                        cctxt, read_deleted=read_deleted):
                    self.assertRaises(exception.InstanceNotFound,
                                      objects.Instance.get_by_uuid,
                                      cctxt, server['id'])
        # Make sure there are no leaked volume attachments.
        attachment_count = self._count_volume_attachments(server['id'])
        self.assertEqual(0, attachment_count, 'Leaked volume attachments: %s' %
                         self.cinder.volume_to_attachment)
コード例 #6
0
ファイル: test_bug_1746483.py プロジェクト: mahak/nova
 def test_boot_from_volume_with_isolated_image(self):
     # Create our server without networking just to keep things simple.
     image_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
     server_req_body = {
         # There is no imageRef because this is boot from volume.
         'server': {
             'flavorRef': '1',   # m1.tiny from DefaultFlavorsFixture,
             'name': 'test_boot_from_volume_with_isolated_image',
             'networks': 'none',
             'block_device_mapping_v2': [{
                 'boot_index': 0,
                 'uuid': image_id,
                 'source_type': 'volume',
                 'destination_type': 'volume'
             }]
         }
     }
     # Note that we're using v2.1 by default but need v2.37 to use
     # networks='none'.
     with utils.temporary_mutation(self.api, microversion='2.37'):
         server = self.api.post_server(server_req_body)
     server = self._wait_for_state_change(self.api, server, 'ACTIVE')
     # NOTE(mriedem): The instance is successfully scheduled but since
     # the image_id from the volume_image_metadata isn't stored in the
     # RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images
     # is True, the isolated host (host1) is filtered out because the
     # filter doesn't have enough information to know if the image within
     # the volume can be used on that host.
     self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
コード例 #7
0
 def test_boot_from_volume_with_isolated_image(self):
     # Create our server without networking just to keep things simple.
     image_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
     server_req_body = {
         # There is no imageRef because this is boot from volume.
         'server': {
             'flavorRef':
             '1',  # m1.tiny from DefaultFlavorsFixture,
             'name':
             'test_boot_from_volume_with_isolated_image',
             'networks':
             'none',
             'block_device_mapping_v2': [{
                 'boot_index': 0,
                 'uuid': image_id,
                 'source_type': 'volume',
                 'destination_type': 'volume'
             }]
         }
     }
     # Note that we're using v2.1 by default but need v2.37 to use
     # networks='none'.
     with utils.temporary_mutation(self.api, microversion='2.37'):
         server = self.api.post_server(server_req_body)
     server = self._wait_for_state_change(self.api, server, 'ACTIVE')
     # NOTE(mriedem): The instance is successfully scheduled but since
     # the image_id from the volume_image_metadata isn't stored in the
     # RequestSpec.image.id, and restrict_isolated_hosts_to_isolated_images
     # is True, the isolated host (host1) is filtered out because the
     # filter doesn't have enough information to know if the image within
     # the volume can be used on that host.
     self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
コード例 #8
0
 def test_update_available_resource_node_recreate(self):
     # First we create a compute service to manage a couple of fake nodes.
     compute = self.start_service('compute', 'node1')
     # When start_service runs, it will create the node1 ComputeNode.
     compute.manager.driver._set_nodes(['node1', 'node2'])
     # Run the update_available_resource periodic to register node2.
     ctxt = context.get_admin_context()
     compute.manager.update_available_resource(ctxt)
     # Make sure no compute nodes were orphaned or deleted.
     self.assertNotIn('Deleting orphan compute node',
                      self.stdlog.logger.output)
     # Now we should have two compute nodes, make sure the hypervisors API
     # shows them.
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(2, len(hypervisors), hypervisors)
     self.assertEqual({'node1', 'node2'},
                      set([hyp['hypervisor_hostname']
                           for hyp in hypervisors]))
     # Now stub the driver to only report node1. This is making it look like
     # node2 is no longer available when update_available_resource runs.
     compute.manager.driver._nodes = ['node1']
     ctxt = context.get_admin_context()
     compute.manager.update_available_resource(ctxt)
     # node2 should have been deleted, check the logs and API.
     log = self.stdlog.logger.output
     self.assertIn('Deleting orphan compute node', log)
     self.assertIn('hypervisor host is node2', log)
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(1, len(hypervisors), hypervisors)
     self.assertEqual('node1', hypervisors[0]['hypervisor_hostname'])
     # But the node2 ComputeNode is still in the database with deleted!=0.
     with utils.temporary_mutation(ctxt, read_deleted='yes'):
         cn = objects.ComputeNode.get_by_host_and_nodename(
             ctxt, 'node1', 'node2')
         self.assertTrue(cn.deleted)
     # Now stub the driver again to report node2 as being back and run
     # the periodic task.
     compute.manager.driver._nodes = ['node1', 'node2']
     LOG.info('Running update_available_resource which should bring back '
              'node2.')
     compute.manager.update_available_resource(ctxt)
     # The DBDuplicateEntry error should have been handled and resulted in
     # updating the (soft) deleted record to no longer be deleted.
     log = self.stdlog.logger.output
     self.assertNotIn('DBDuplicateEntry', log)
     # Should have two reported hypervisors again.
     hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
     self.assertEqual(2, len(hypervisors), hypervisors)
     # Now that the node2 record was un-soft-deleted, archiving should not
     # remove any compute_nodes.
     LOG.info('Archiving the database.')
     archived = db_api.archive_deleted_rows(1000)[0]
     self.assertNotIn('compute_nodes', archived)
     cn2 = objects.ComputeNode.get_by_host_and_nodename(
         ctxt, 'node1', 'node2')
     self.assertFalse(cn2.deleted)
     self.assertIsNone(cn2.deleted_at)
     # The node2 id and uuid should not have changed in the DB.
     self.assertEqual(cn.id, cn2.id)
     self.assertEqual(cn.uuid, cn2.uuid)
コード例 #9
0
    def assert_hypervisor_usage(
        self,
        compute_node_uuid,
        flavor,
        volume_backed,
    ):
        """Asserts the given hypervisor's resource usage matches the
        given flavor (assumes a single instance on the hypervisor).

        :param compute_node_uuid: UUID of the ComputeNode to check.
        :param flavor: "flavor" entry dict from from GET /flavors/{flavor_id}
        :param volume_backed: True if the flavor is used with a volume-backed
            server, False otherwise.
        """
        # GET /os-hypervisors/{uuid} requires at least 2.53
        with utils.temporary_mutation(self.admin_api, microversion='2.53'):
            hypervisor = self.admin_api.api_get(
                '/os-hypervisors/%s' % compute_node_uuid).body['hypervisor']

        if volume_backed:
            expected_disk_usage = 0
        else:
            expected_disk_usage = flavor['disk']

        # Account for reserved_host_disk_mb.
        expected_disk_usage += compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        self.assertEqual(expected_disk_usage, hypervisor['local_gb_used'])
        # Account for reserved_host_memory_mb.
        expected_ram_usage = CONF.reserved_host_memory_mb + flavor['ram']
        self.assertEqual(expected_ram_usage, hypervisor['memory_mb_used'])
        # Account for reserved_host_cpus.
        expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
        self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
コード例 #10
0
ファイル: integrated_helpers.py プロジェクト: openstack/nova
    def assert_hypervisor_usage(self, compute_node_uuid, flavor,
                                volume_backed):
        """Asserts the given hypervisor's resource usage matches the
        given flavor (assumes a single instance on the hypervisor).

        :param compute_node_uuid: UUID of the ComputeNode to check.
        :param flavor: "flavor" entry dict from from GET /flavors/{flavor_id}
        :param volume_backed: True if the flavor is used with a volume-backed
            server, False otherwise.
        """
        # GET /os-hypervisors/{uuid} requires at least 2.53
        with utils.temporary_mutation(self.admin_api, microversion='2.53'):
            hypervisor = self.admin_api.api_get(
                '/os-hypervisors/%s' % compute_node_uuid).body['hypervisor']
        if volume_backed:
            expected_disk_usage = 0
        else:
            expected_disk_usage = flavor['disk']
        # Account for reserved_host_disk_mb.
        expected_disk_usage += compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        self.assertEqual(expected_disk_usage, hypervisor['local_gb_used'])
        # Account for reserved_host_memory_mb.
        expected_ram_usage = CONF.reserved_host_memory_mb + flavor['ram']
        self.assertEqual(expected_ram_usage, hypervisor['memory_mb_used'])
        # Account for reserved_host_cpus.
        expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
        self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
コード例 #11
0
    def _delete_and_check_allocations(self, server):
        """Delete the instance and asserts that the allocations are cleaned

        If the server was moved (resized or live migrated), also checks that
        migration-based allocations are also cleaned up.

        :param server: The API representation of the instance to be deleted
        :returns: The uuid of the migration record associated with the resize
            or cold migrate operation
        """
        # First check to see if there is a related migration record so we can
        # assert its allocations (if any) are not leaked.
        with utils.temporary_mutation(self.admin_api, microversion='2.59'):
            migrations = self.admin_api.api_get(
                '/os-migrations?instance_uuid=%s' %
                server['id']).body['migrations']

        if migrations:
            # If there is more than one migration, they are sorted by
            # created_at in descending order so we'll get the last one
            # which is probably what we'd always want anyway.
            migration_uuid = migrations[0]['uuid']
        else:
            migration_uuid = None

        self._delete_server(server)

        # NOTE(gibi): The resource allocation is deleted after the instance is
        # destroyed in the db so wait_until_deleted might return before the
        # the resource are deleted in placement. So we need to wait for the
        # instance.delete.end notification as that is emitted after the
        # resources are freed.

        fake_notifier.wait_for_versioned_notifications('instance.delete.end')

        for rp_uuid in [
                self._get_provider_uuid_by_host(hostname)
                for hostname in self.computes.keys()
        ]:
            self.assertRequestMatchesUsage(
                {
                    'VCPU': 0,
                    'MEMORY_MB': 0,
                    'DISK_GB': 0
                }, rp_uuid)

        # and no allocations for the deleted server
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertEqual(0, len(allocations))

        if migration_uuid:
            # and no allocations for the delete migration
            allocations = self._get_allocations_by_server_uuid(migration_uuid)
            self.assertEqual(0, len(allocations))

        return migration_uuid
コード例 #12
0
ファイル: test_wsgi.py プロジェクト: y00187570/nova
    def test_init_application_passes_sys_argv_to_config(self):

        with utils.temporary_mutation(sys, argv=mock.sentinel.argv):
            with mock.patch('nova.config.parse_args') as mock_parse_args:
                wsgi_app.init_application('test-app')
                mock_parse_args.assert_called_once_with(
                    mock.sentinel.argv,
                    default_config_files=[
                        '/etc/nova/api-paste.ini', '/etc/nova/nova.conf'
                    ])
コード例 #13
0
ファイル: messaging.py プロジェクト: bopopescu/zknova
    def instance_update_at_top(self, message, instance, **kwargs):
        """Update an instance in the DB if we're a top level cell."""
        if not self._at_the_top():
            return
        instance_uuid = instance['uuid']

        # Remove things that we can't update in the top level cells.
        # 'metadata' is only updated in the API cell, so don't overwrite
        # it based on what child cells say.  Make sure to update
        # 'cell_name' based on the routing path.
        items_to_remove = [
            'id', 'security_groups', 'instance_type', 'volumes', 'cell_name',
            'name', 'metadata'
        ]
        for key in items_to_remove:
            instance.pop(key, None)
        instance['cell_name'] = _reverse_path(message.routing_path)

        # Fixup info_cache.  We'll have to update this separately if
        # it exists.
        info_cache = instance.pop('info_cache', None)
        if info_cache is not None:
            info_cache.pop('id', None)
            info_cache.pop('instance', None)

        # Fixup system_metadata (should be a dict for update, not a list)
        if ('system_metadata' in instance
                and isinstance(instance['system_metadata'], list)):
            sys_metadata = dict([(md['key'], md['value'])
                                 for md in instance['system_metadata']])
            instance['system_metadata'] = sys_metadata

        LOG.debug(
            _("Got update for instance %(instance_uuid)s: "
              "%(instance)s") % locals())

        # It's possible due to some weird condition that the instance
        # was already set as deleted... so we'll attempt to update
        # it with permissions that allows us to read deleted.
        with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
            try:
                self.db.instance_update(message.ctxt,
                                        instance_uuid,
                                        instance,
                                        update_cells=False)
            except exception.NotFound:
                # FIXME(comstud): Strange.  Need to handle quotas here,
                # if we actually want this code to remain..
                self.db.instance_create(message.ctxt, instance)
        if info_cache:
            self.db.instance_info_cache_update(message.ctxt,
                                               instance_uuid,
                                               info_cache,
                                               update_cells=False)
コード例 #14
0
ファイル: test_bug_1896463.py プロジェクト: y00187570/nova
    def _get_compute_node_id_by_host(self, host):
        # we specifically need the integer id of the node not the UUID so we
        # need to use the old microversion
        with utils.temporary_mutation(self.admin_api, microversion='2.52'):
            hypers = self.admin_api.api_get(
                'os-hypervisors').body['hypervisors']
            for hyper in hypers:
                if hyper['hypervisor_hostname'] == host:
                    return hyper['id']

            self.fail('Hypervisor with hostname=%s not found' % host)
コード例 #15
0
ファイル: test_policy.py プロジェクト: vwangyanweida/nova
 def _test_host_status_unknown_only(self, func_name, *args):
     admin_func = getattr(self.admin_api, func_name)
     func = getattr(self.api, func_name)
     # Run the operation as admin and extract the server from the response.
     server = self._get_server(admin_func(*args))
     # We need to wait for ACTIVE if this was a post rebuild server action,
     # else a subsequent rebuild request will fail with a 409 in the API.
     self._wait_for_state_change(server, 'ACTIVE')
     # Verify admin can see the host status UP.
     self.assertEqual('UP', server['host_status'])
     # Get server as normal non-admin user.
     server = self._get_server(func(*args))
     self._wait_for_state_change(server, 'ACTIVE')
     # Verify non-admin do not receive the host_status field because it is
     # not UNKNOWN.
     self.assertNotIn('host_status', server)
     # Stop the compute service to trigger UNKNOWN host_status.
     self.compute.stop()
     # Advance time by 30 minutes so nova considers service as down.
     minutes_from_now = timeutils.utcnow() + datetime.timedelta(minutes=30)
     timeutils.set_time_override(override_time=minutes_from_now)
     self.addCleanup(timeutils.clear_time_override)
     # Run the operation as admin and extract the server from the response.
     server = self._get_server(admin_func(*args))
     # Verify admin can see the host status UNKNOWN.
     self.assertEqual('UNKNOWN', server['host_status'])
     # Now that the compute service is down, the rebuild will not ever
     # complete. But we're only interested in what would be returned from
     # the API post rebuild action, so reset the state to ACTIVE to allow
     # the next rebuild request to go through without a 409 error.
     self._set_server_state_active(server)
     # Run the operation as a normal non-admin user and extract the server
     # from the response.
     server = self._get_server(func(*args))
     # Verify non-admin can see the host status UNKNOWN too.
     self.assertEqual('UNKNOWN', server['host_status'])
     self._set_server_state_active(server)
     # Now, adjust the policy to make it so only admin are allowed to see
     # UNKNOWN host status only.
     self.policy.set_rules(
         {self.host_status_unknown_only_rule: 'rule:admin_api'},
         overwrite=False)
     # Run the operation as a normal non-admin user and extract the server
     # from the response.
     server = self._get_server(func(*args))
     # Verify non-admin do not receive the host_status field.
     self.assertNotIn('host_status', server)
     self._set_server_state_active(server)
     # Verify that admin will not receive ths host_status field if the
     # API microversion < 2.16.
     with utils.temporary_mutation(self.admin_api, microversion='2.15'):
         server = self._get_server(admin_func(*args))
         self.assertNotIn('host_status', server)
コード例 #16
0
    def _build_server(self,
                      name=None,
                      image_uuid=None,
                      flavor_id=None,
                      networks=None,
                      az=None,
                      host=None):
        """Build a request for the server create API.

        :param name: A name for the server.
        :param image_uuid: The ID of an existing image.
        :param flavor_id: The ID of an existing flavor.
        :param networks: A dict of networks to attach or a string of 'none' or
            'auto'.
        :param az: The name of the availability zone the instance should
            request.
        :param host: The host to boot the instance on. Requires API
            microversion 2.74 or greater.
        :returns: The generated request body.
        """
        if not name:
            name = ''.join(
                random.choice(string.ascii_lowercase) for i in range(20))

        if image_uuid is None:  # we need to handle ''
            # NOTE(takashin): In API version 2.36, image APIs were deprecated.
            # In API version 2.36 or greater, self.api.get_images() returns
            # a 404 error. In that case, 'image_uuid' should be specified.
            with utils.temporary_mutation(self.api, microversion='2.35'):
                image_uuid = self.api.get_images()[0]['id']

        if not flavor_id:
            # Set a valid flavorId
            flavor_id = self.api.get_flavors()[0]['id']

        server = {
            'name': name,
            'imageRef': image_uuid,
            'flavorRef': 'http://fake.server/%s' % flavor_id,
        }

        if networks is not None:
            server['networks'] = networks

        if az is not None:
            server['availability_zone'] = az

        # This requires at least microversion 2.74 to work
        if host is not None:
            server['host'] = host

        return server
コード例 #17
0
ファイル: utils.py プロジェクト: grwl/nova
def notify_usage_exists(context, instance_ref, current_period=False,
                        ignore_missing_network_data=True,
                        system_metadata=None, extra_usage_info=None):
    """Generates 'exists' notification for an instance for usage auditing
    purposes.

    :param current_period: if True, this will generate a usage for the
        current usage period; if False, this will generate a usage for the
        previous audit period.

    :param ignore_missing_network_data: if True, log any exceptions generated
        while getting network info; if False, raise the exception.
    :param system_metadata: system_metadata DB entries for the instance,
        if not None.  *NOTE*: Currently unused here in trunk, but needed for
        potential custom modifications.
    :param extra_usage_info: Dictionary containing extra values to add or
        override in the notification if not None.
    """

    audit_start, audit_end = notifications.audit_period_bounds(current_period)

    bw = notifications.bandwidth_usage(instance_ref, audit_start,
            ignore_missing_network_data)

    if system_metadata is None:
        try:
            if instance_ref.get('deleted'):
                with utils.temporary_mutation(context, read_deleted='yes'):
                    system_metadata = db.instance_system_metadata_get(
                            context, instance_ref.uuid)
            else:
                system_metadata = db.instance_system_metadata_get(
                        context, instance_ref.uuid)
        except exception.NotFound:
            system_metadata = {}

    # add image metadata to the notification:
    image_meta = notifications.image_meta(system_metadata)

    extra_info = dict(audit_period_beginning=str(audit_start),
                      audit_period_ending=str(audit_end),
                      bandwidth=bw, image_meta=image_meta)

    if extra_usage_info:
        extra_info.update(extra_usage_info)

    notify_about_instance_usage(context, instance_ref, 'exists',
            system_metadata=system_metadata, extra_usage_info=extra_info)
コード例 #18
0
ファイル: messaging.py プロジェクト: dscannell/nova
    def instance_update_at_top(self, message, instance, **kwargs):
        """Update an instance in the DB if we're a top level cell."""
        if not self._at_the_top():
            return
        instance_uuid = instance['uuid']

        # Remove things that we can't update in the top level cells.
        # 'metadata' is only updated in the API cell, so don't overwrite
        # it based on what child cells say.  Make sure to update
        # 'cell_name' based on the routing path.
        items_to_remove = ['id', 'security_groups', 'instance_type',
                'volumes', 'cell_name', 'name', 'metadata']
        for key in items_to_remove:
            instance.pop(key, None)
        instance['cell_name'] = _reverse_path(message.routing_path)

        # Fixup info_cache.  We'll have to update this separately if
        # it exists.
        info_cache = instance.pop('info_cache', None)
        if info_cache is not None:
            info_cache.pop('id', None)
            info_cache.pop('instance', None)

        # Fixup system_metadata (should be a dict for update, not a list)
        if ('system_metadata' in instance and
                isinstance(instance['system_metadata'], list)):
            sys_metadata = dict([(md['key'], md['value'])
                    for md in instance['system_metadata']])
            instance['system_metadata'] = sys_metadata

        LOG.debug(_("Got update for instance %(instance_uuid)s: "
                "%(instance)s") % locals())

        # It's possible due to some weird condition that the instance
        # was already set as deleted... so we'll attempt to update
        # it with permissions that allows us to read deleted.
        with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
            try:
                self.db.instance_update(message.ctxt, instance_uuid,
                        instance, update_cells=False)
            except exception.NotFound:
                # FIXME(comstud): Strange.  Need to handle quotas here,
                # if we actually want this code to remain..
                self.db.instance_create(message.ctxt, instance)
        if info_cache:
            self.db.instance_info_cache_update(message.ctxt, instance_uuid,
                    info_cache, update_cells=False)
コード例 #19
0
    def test_rebuild_instance_with_image_traits_on_shared_rp(self):
        shared_rp_uuid = self.create_shared_storage_rp()
        # add both cn_rp and shared_rp under one aggregate
        self._set_aggregate(shared_rp_uuid, uuids.shr_disk_agg)
        self._set_aggregate(self.host_uuid, uuids.shr_disk_agg)

        self.assertIn("DISK_GB", self._get_provider_inventory(self.host_uuid))

        # run update_available_resource periodic task after configuring shared
        # resource provider to update compute node resources
        self._run_periodics()

        # we expect that the virt driver stops reporting DISK_GB on the compute
        # RP as soon as a shared RP with DISK_GB is created in the compute tree
        self.assertNotIn("DISK_GB",
                         self._get_provider_inventory(self.host_uuid))

        server = self._create_server(
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=1,
            networks='none')

        rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']

        with utils.temporary_mutation(self.api, microversion='2.35'):
            self.api.api_put(
                '/images/%s/metadata' % rebuild_image_ref,
                {'metadata': {
                    'trait:STORAGE_DISK_SSD': 'required'
                }})
        rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
        self.api.api_post('/servers/%s/action' % server['id'],
                          rebuild_req_body)
        self._wait_for_server_parameter(server,
                                        {'OS-EXT-STS:task_state': None})

        # get shared_rp and cn_rp usages
        shared_rp_usages = self._get_provider_usages(shared_rp_uuid)
        cn_rp_usages = self._get_provider_usages(self.host_uuid)
        # Check if DISK_GB resource is allocated from shared_RP and the
        # remaining resources are allocated from host_uuid.
        self.assertEqual({'DISK_GB': 1}, shared_rp_usages)
        self.assertEqual({'MEMORY_MB': 512, 'VCPU': 1}, cn_rp_usages)
        allocs = self._get_allocations_by_server_uuid(server['id'])
        self.assertIn(self.host_uuid, allocs)
        server = self.api.get_server(server['id'])
        self.assertEqual(rebuild_image_ref, server['image']['id'])
コード例 #20
0
ファイル: integrated_helpers.py プロジェクト: openstack/nova
    def _delete_and_check_allocations(self, server):
        """Delete the instance and asserts that the allocations are cleaned

        If the server was moved (resized or live migrated), also checks that
        migration-based allocations are also cleaned up.

        :param server: The API representation of the instance to be deleted
        """

        # First check to see if there is a related migration record so we can
        # assert its allocations (if any) are not leaked.
        with utils.temporary_mutation(self.admin_api, microversion='2.59'):
            migrations = self.admin_api.api_get(
                '/os-migrations?instance_uuid=%s' %
                server['id']).body['migrations']
        if migrations:
            # If there is more than one migration, they are sorted by
            # created_at in descending order so we'll get the last one
            # which is probably what we'd always want anyway.
            migration_uuid = migrations[0]['uuid']
        else:
            migration_uuid = None

        self.api.delete_server(server['id'])
        self._wait_until_deleted(server)
        # NOTE(gibi): The resource allocation is deleted after the instance is
        # destroyed in the db so wait_until_deleted might return before the
        # the resource are deleted in placement. So we need to wait for the
        # instance.delete.end notification as that is emitted after the
        # resources are freed.

        fake_notifier.wait_for_versioned_notifications('instance.delete.end')

        for rp_uuid in [self._get_provider_uuid_by_host(hostname)
                        for hostname in self.computes.keys()]:
            self.assertRequestMatchesUsage({'VCPU': 0,
                                            'MEMORY_MB': 0,
                                            'DISK_GB': 0}, rp_uuid)

        # and no allocations for the deleted server
        allocations = self._get_allocations_by_server_uuid(server['id'])
        self.assertEqual(0, len(allocations))

        if migration_uuid:
            # and no allocations for the delete migration
            allocations = self._get_allocations_by_server_uuid(migration_uuid)
            self.assertEqual(0, len(allocations))
コード例 #21
0
    def test_rebuild_instance_with_image_traits_on_shared_rp_no_valid_host(
            self):
        shared_rp_uuid = self.create_shared_storage_rp()
        # add both cn_rp and shared_rp under one aggregate
        self._set_aggregate(shared_rp_uuid, uuids.shr_disk_agg)
        self._set_aggregate(self.host_uuid, uuids.shr_disk_agg)

        self.assertIn("DISK_GB", self._get_provider_inventory(self.host_uuid))

        # run update_available_resource periodic task after configuring shared
        # resource provider to update compute node resources
        self._run_periodics()

        # we expect that the virt driver stops reporting DISK_GB on the compute
        # RP as soon as a shared RP with DISK_GB is created in the compute tree
        self.assertNotIn("DISK_GB",
                         self._get_provider_inventory(self.host_uuid))

        # create server
        org_image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
        server = self._create_server(
            image_uuid=org_image_id,
            flavor_id=1,
            networks='none',
        )

        rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']

        with utils.temporary_mutation(self.api, microversion='2.35'):
            self.api.api_put('/images/%s/metadata' % rebuild_image_ref,
                             {'metadata': {
                                 'trait:CUSTOM_FOO': 'required'
                             }})
        rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
        self.api.api_post('/servers/%s/action' % server['id'],
                          rebuild_req_body)
        # Look for the failed rebuild action.
        self._wait_for_action_fail_completion(server, instance_actions.REBUILD,
                                              'rebuild_server')
        # Assert the server image_ref was rolled back on failure.
        server = self.api.get_server(server['id'])
        self.assertEqual(org_image_id, server['image']['id'])

        # The server should be in ERROR state
        self.assertEqual('ERROR', server['status'])
        self.assertIn('No valid host', server['fault']['message'])
コード例 #22
0
ファイル: messaging.py プロジェクト: ameade/nova
    def instance_update_at_top(self, message, instance, **kwargs):
        """Update an instance in the DB if we're a top level cell."""
        if not self._at_the_top():
            return
        instance_uuid = instance["uuid"]
        routing_path = message.routing_path
        instance["cell_name"] = _reverse_path(routing_path)
        # Remove things that we can't update in the top level cells.
        # 'cell_name' is included in this list.. because we'll set it
        # ourselves based on the reverse of the routing path.  metadata
        # is only updated in the API cell, so we don't listen to what
        # the child cell tells us.
        items_to_remove = ["id", "security_groups", "instance_type", "volumes", "cell_name", "name", "metadata"]
        for key in items_to_remove:
            instance.pop(key, None)

        # Fixup info_cache.  We'll have to update this separately if
        # it exists.
        info_cache = instance.pop("info_cache", None)
        if info_cache is not None:
            info_cache.pop("id", None)
            info_cache.pop("instance", None)

        # Fixup system_metadata (should be a dict for update, not a list)
        if "system_metadata" in instance and isinstance(instance["system_metadata"], list):
            sys_metadata = dict([(md["key"], md["value"]) for md in instance["system_metadata"]])
            instance["system_metadata"] = sys_metadata

        LOG.debug(_("Got update for instance %(instance_uuid)s: " "%(instance)s") % locals())

        # It's possible due to some weird condition that the instance
        # was already set as deleted... so we'll attempt to update
        # it with permissions that allows us to read deleted.
        with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
            try:
                self.db.instance_update(message.ctxt, instance_uuid, instance, update_cells=False)
            except exception.NotFound:
                # FIXME(comstud): Strange.  Need to handle quotas here,
                # if we actually want this code to remain..
                self.db.instance_create(message.ctxt, instance)
        if info_cache:
            self.db.instance_info_cache_update(message.ctxt, instance_uuid, info_cache, update_cells=False)
コード例 #23
0
ファイル: integrated_helpers.py プロジェクト: yangbo7907/nova
    def _build_server(self, flavor_id, image=None):
        server = {}
        if image is None:
            # TODO(stephenfin): We need to stop relying on this API
            with utils.temporary_mutation(self.api, microversion='2.35'):
                image = self.api.get_images()[0]
            LOG.debug("Image: %s", image)

            # We now have a valid imageId
            server[self._image_ref_parameter] = image['id']
        else:
            server[self._image_ref_parameter] = image

        # Set a valid flavorId
        flavor = self.api.get_flavor(flavor_id)
        LOG.debug("Using flavor: %s", flavor)
        server[self._flavor_ref_parameter] = ('http://fake.server/%s'
                                              % flavor['id'])

        # Set a valid server name
        server_name = self.get_unused_server_name()
        server['name'] = server_name
        return server
コード例 #24
0
ファイル: test_server_group.py プロジェクト: zhaofeidl/nova
    def test_serial_no_valid_host_then_pass_with_third_host(self):
        """Creates 2 servers in order (not a multi-create request) in an
        anti-affinity group so there will be 1 server on each host. Then
        attempts to live migrate the first server which will fail because the
        only other available host will be full. Then starts up a 3rd compute
        service and retries the live migration which should then pass.
        """
        # Create the anti-affinity group used for the servers.
        group = self.api.post_server_groups(
            {'name': 'test_serial_no_valid_host_then_pass_with_third_host',
             'policies': ['anti-affinity']})
        servers = []
        for x in range(2):
            server = self._build_minimal_create_server_request(
                self.api,
                'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
                networks='none')
            # Add the group hint so the server is created in our group.
            server_req = {
                'server': server,
                'os:scheduler_hints': {'group': group['id']}
            }
            # Use microversion 2.37 for passing networks='none'.
            with utils.temporary_mutation(self.api, microversion='2.37'):
                server = self.api.post_server(server_req)
                servers.append(
                    self._wait_for_state_change(
                        self.admin_api, server, 'ACTIVE'))

        # Make sure each server is on a unique host.
        hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
        self.assertEqual(2, len(hosts))

        # And make sure the group has 2 members.
        members = self.api.get_server_group(group['id'])['members']
        self.assertEqual(2, len(members))

        # Now attempt to live migrate one of the servers which should fail
        # because we don't have a free host. Since we're using microversion 2.1
        # the scheduling will be synchronous and we should get back a 400
        # response for the NoValidHost error.
        body = {
            'os-migrateLive': {
                'host': None,
                'block_migration': False,
                'disk_over_commit': False
            }
        }
        # Specifically use the first server since that was the first member
        # added to the group.
        server = servers[0]
        ex = self.assertRaises(client.OpenStackApiException,
                               self.admin_api.post_server_action,
                               server['id'], body)
        self.assertEqual(400, ex.response.status_code)
        self.assertIn('No valid host', six.text_type(ex))

        # Now start up a 3rd compute service and retry the live migration which
        # should work this time.
        self.start_service('compute', host='host3')
        self.admin_api.post_server_action(server['id'], body)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Now the server should be on host3 since that was the only available
        # host for the live migration.
        self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
コード例 #25
0
ファイル: test_server_group.py プロジェクト: zhaofeidl/nova
 def _set_forced_down(self, service, forced_down):
     # Use microversion 2.53 for PUT /os-services/{service_id} force down.
     with utils.temporary_mutation(self.admin_api, microversion='2.53'):
         self.admin_api.put_service_force_down(service.service_ref.uuid,
                                               forced_down)
コード例 #26
0
ファイル: test_server_group.py プロジェクト: mahak/nova
    def test_serial_no_valid_host_then_pass_with_third_host(self):
        """Creates 2 servers in order (not a multi-create request) in an
        anti-affinity group so there will be 1 server on each host. Then
        attempts to live migrate the first server which will fail because the
        only other available host will be full. Then starts up a 3rd compute
        service and retries the live migration which should then pass.
        """
        # Create the anti-affinity group used for the servers.
        group = self.api.post_server_groups(
            {'name': 'test_serial_no_valid_host_then_pass_with_third_host',
             'policies': ['anti-affinity']})
        servers = []
        for x in range(2):
            server = self._build_minimal_create_server_request(
                self.api,
                'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
                networks='none')
            # Add the group hint so the server is created in our group.
            server_req = {
                'server': server,
                'os:scheduler_hints': {'group': group['id']}
            }
            # Use microversion 2.37 for passing networks='none'.
            with utils.temporary_mutation(self.api, microversion='2.37'):
                server = self.api.post_server(server_req)
                servers.append(
                    self._wait_for_state_change(
                        self.admin_api, server, 'ACTIVE'))

        # Make sure each server is on a unique host.
        hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
        self.assertEqual(2, len(hosts))

        # And make sure the group has 2 members.
        members = self.api.get_server_group(group['id'])['members']
        self.assertEqual(2, len(members))

        # Now attempt to live migrate one of the servers which should fail
        # because we don't have a free host. Since we're using microversion 2.1
        # the scheduling will be synchronous and we should get back a 400
        # response for the NoValidHost error.
        body = {
            'os-migrateLive': {
                'host': None,
                'block_migration': False,
                'disk_over_commit': False
            }
        }
        # Specifically use the first server since that was the first member
        # added to the group.
        server = servers[0]
        ex = self.assertRaises(client.OpenStackApiException,
                               self.admin_api.post_server_action,
                               server['id'], body)
        self.assertEqual(400, ex.response.status_code)
        self.assertIn('No valid host', six.text_type(ex))

        # Now start up a 3rd compute service and retry the live migration which
        # should work this time.
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')
        self.admin_api.post_server_action(server['id'], body)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Now the server should be on host3 since that was the only available
        # host for the live migration.
        self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
コード例 #27
0
ファイル: notifications.py プロジェクト: grwl/nova
def usage_from_instance(context, instance_ref, network_info,
                system_metadata, **kw):
    """Get usage information for an instance which is common to all
    notifications.

    :param network_info: network_info provided if not None
    :param system_metadata: system_metadata DB entries for the instance,
    if not None.  *NOTE*: Currently unused here in trunk, but needed for
    potential custom modifications.
    """

    def null_safe_str(s):
        return str(s) if s else ''

    image_ref_url = utils.generate_image_url(instance_ref['image_ref'])

    instance_type_name = instance_ref.get('instance_type', {}).get('name', '')

    if system_metadata is None:
        try:
            if instance_ref.get('deleted'):
                with utils.temporary_mutation(context, read_deleted='yes'):
                    system_metadata = db.instance_system_metadata_get(
                            context, instance_ref['uuid'])
            else:
                system_metadata = db.instance_system_metadata_get(
                        context, instance_ref['uuid'])

        except exception.NotFound:
            system_metadata = {}

    usage_info = dict(
        # Owner properties
        tenant_id=instance_ref['project_id'],
        user_id=instance_ref['user_id'],

        # Identity properties
        instance_id=instance_ref['uuid'],
        display_name=instance_ref['display_name'],
        reservation_id=instance_ref['reservation_id'],

        # Type properties
        instance_type=instance_type_name,
        instance_type_id=instance_ref['instance_type_id'],
        architecture=instance_ref['architecture'],

        # Capacity properties
        memory_mb=instance_ref['memory_mb'],
        disk_gb=instance_ref['root_gb'] + instance_ref['ephemeral_gb'],
        vcpus=instance_ref['vcpus'],
        # Note(dhellmann): This makes the disk_gb value redundant, but
        # we are keeping it for backwards-compatibility with existing
        # users of notifications.
        root_gb=instance_ref['root_gb'],
        ephemeral_gb=instance_ref['ephemeral_gb'],

        # Location properties
        host=instance_ref['host'],
        availability_zone=instance_ref['availability_zone'],

        # Date properties
        created_at=str(instance_ref['created_at']),
        # Nova's deleted vs terminated instance terminology is confusing,
        # this should be when the instance was deleted (i.e. terminated_at),
        # not when the db record was deleted. (mdragon)
        deleted_at=null_safe_str(instance_ref.get('terminated_at')),
        launched_at=null_safe_str(instance_ref.get('launched_at')),

        # Image properties
        image_ref_url=image_ref_url,
        os_type=instance_ref['os_type'],
        kernel_id=instance_ref['kernel_id'],
        ramdisk_id=instance_ref['ramdisk_id'],

        # Status properties
        state=instance_ref['vm_state'],
        state_description=null_safe_str(instance_ref.get('task_state')),
        )

    if network_info is not None:
        usage_info['fixed_ips'] = network_info.fixed_ips()

    # add image metadata
    image_meta_props = image_meta(system_metadata)
    usage_info["image_meta"] = image_meta_props

    usage_info.update(kw)
    return usage_info
コード例 #28
0
    def _resize_and_validate(self,
                             volume_backed=False,
                             stopped=False,
                             target_host=None):
        """Creates and resizes the server to another cell. Validates various
        aspects of the server and its related records (allocations, migrations,
        actions, VIF tags, etc).

        :param volume_backed: True if the server should be volume-backed, False
            if image-backed.
        :param stopped: True if the server should be stopped prior to resize,
            False if the server should be ACTIVE
        :param target_host: If not None, triggers a cold migration to the
            specified host.
        :returns: tuple of:
            - server response object
            - source compute node resource provider uuid
            - target compute node resource provider uuid
            - old flavor
            - new flavor
        """
        # Create the server.
        flavors = self.api.get_flavors()
        old_flavor = flavors[0]
        server = self._create_server(old_flavor, volume_backed=volume_backed)
        original_host = server['OS-EXT-SRV-ATTR:host']
        image_uuid = None if volume_backed else server['image']['id']

        # Our HostNameWeigher ensures the server starts in cell1, so we expect
        # the server AZ to be cell1 as well.
        self.assertEqual('cell1', server['OS-EXT-AZ:availability_zone'])

        if stopped:
            # Stop the server before resizing it.
            self.api.post_server_action(server['id'], {'os-stop': None})
            self._wait_for_state_change(self.api, server, 'SHUTOFF')

        # Before resizing make sure quota usage is only 1 for total instances.
        self.assert_quota_usage(expected_num_instances=1)

        if target_host:
            # Cold migrate the server to the target host.
            new_flavor = old_flavor  # flavor does not change for cold migrate
            body = {'migrate': {'host': target_host}}
            expected_host = target_host
        else:
            # Resize it which should migrate the server to the host in the
            # other cell.
            new_flavor = flavors[1]
            body = {'resize': {'flavorRef': new_flavor['id']}}
            expected_host = 'host1' if original_host == 'host2' else 'host2'

        self.stub_image_create()

        self.api.post_server_action(server['id'], body)
        # Wait for the server to be resized and then verify the host has
        # changed to be the host in the other cell.
        server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
        self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
        # Assert that the instance is only listed one time from the API (to
        # make sure it's not listed out of both cells).
        # Note that we only get one because the DB API excludes hidden
        # instances by default (see instance_get_all_by_filters_sort).
        servers = self.api.get_servers()
        self.assertEqual(1, len(servers),
                         'Unexpected number of servers: %s' % servers)
        self.assertEqual(expected_host, servers[0]['OS-EXT-SRV-ATTR:host'])

        # And that there is only one migration record.
        migrations = self.api.api_get('/os-migrations?instance_uuid=%s' %
                                      server['id']).body['migrations']
        self.assertEqual(
            1, len(migrations),
            'Unexpected number of migrations records: %s' % migrations)
        migration = migrations[0]
        self.assertEqual('finished', migration['status'])

        # There should be at least two actions, one for create and one for the
        # resize. There will be a third action if the server was stopped.
        actions = self.api.api_get('/servers/%s/os-instance-actions' %
                                   server['id']).body['instanceActions']
        expected_num_of_actions = 3 if stopped else 2
        self.assertEqual(expected_num_of_actions, len(actions), actions)
        # Each action should have events (make sure these were copied from
        # the source cell to the target cell).
        for action in actions:
            detail = self.api.api_get(
                '/servers/%s/os-instance-actions/%s' %
                (server['id'], action['request_id'])).body['instanceAction']
            self.assertNotEqual(0, len(detail['events']), detail)

        # The tag should still be present on the server.
        self.assertEqual(1, len(server['tags']),
                         'Server tags not found in target cell.')
        self.assertEqual('test', server['tags'][0])

        # Confirm the source node has allocations for the old flavor and the
        # target node has allocations for the new flavor.
        source_rp_uuid = self._get_provider_uuid_by_host(original_host)
        # The source node allocations should be on the migration record.
        source_allocations = self._get_allocations_by_provider_uuid(
            source_rp_uuid)[migration['uuid']]['resources']
        self.assertFlavorMatchesAllocation(old_flavor,
                                           source_allocations,
                                           volume_backed=volume_backed)

        target_rp_uuid = self._get_provider_uuid_by_host(expected_host)
        # The target node allocations should be on the instance record.
        target_allocations = self._get_allocations_by_provider_uuid(
            target_rp_uuid)[server['id']]['resources']
        self.assertFlavorMatchesAllocation(new_flavor,
                                           target_allocations,
                                           volume_backed=volume_backed)

        # The instance, in the target cell DB, should have the old and new
        # flavor stored with it with the values we expect at this point.
        target_cell_name = self.host_to_cell_mappings[expected_host]
        self.assertEqual(target_cell_name,
                         server['OS-EXT-AZ:availability_zone'])
        target_cell = self.cell_mappings[target_cell_name]
        admin_context = nova_context.get_admin_context()
        with nova_context.target_cell(admin_context, target_cell) as cctxt:
            inst = objects.Instance.get_by_uuid(cctxt,
                                                server['id'],
                                                expected_attrs=['flavor'])
            self.assertIsNotNone(
                inst.old_flavor,
                'instance.old_flavor not saved in target cell')
            self.assertIsNotNone(
                inst.new_flavor,
                'instance.new_flavor not saved in target cell')
            self.assertEqual(inst.flavor.flavorid, inst.new_flavor.flavorid)
            if target_host:  # cold migrate so flavor does not change
                self.assertEqual(inst.flavor.flavorid,
                                 inst.old_flavor.flavorid)
            else:
                self.assertNotEqual(inst.flavor.flavorid,
                                    inst.old_flavor.flavorid)
            self.assertEqual(old_flavor['id'], inst.old_flavor.flavorid)
            self.assertEqual(new_flavor['id'], inst.new_flavor.flavorid)
            # Assert the ComputeManager._set_instance_info fields
            # are correct after the resize.
            self.assert_instance_fields_match_flavor(inst, new_flavor)
            # The availability_zone field in the DB should also be updated.
            self.assertEqual(target_cell_name, inst.availability_zone)

        # Assert the VIF tag was carried through to the target cell DB.
        interface_attachments = self.api.get_port_interfaces(server['id'])
        self.assertEqual(1, len(interface_attachments))
        self.assertEqual('private', interface_attachments[0]['tag'])

        if volume_backed:
            # Assert the BDM tag was carried through to the target cell DB.
            volume_attachments = self.api.get_server_volumes(server['id'])
            self.assertEqual(1, len(volume_attachments))
            self.assertEqual('root', volume_attachments[0]['tag'])

        # Make sure the guest is no longer tracked on the source node.
        source_guest_uuids = (
            self.computes[original_host].manager.driver.list_instance_uuids())
        self.assertNotIn(server['id'], source_guest_uuids)
        # And the guest is on the target node hypervisor.
        target_guest_uuids = (
            self.computes[expected_host].manager.driver.list_instance_uuids())
        self.assertIn(server['id'], target_guest_uuids)

        # The source hypervisor continues to report usage in the hypervisors
        # API because even though the guest was destroyed there, the instance
        # resources are still claimed on that node in case the user reverts.
        self.assert_hypervisor_usage(source_rp_uuid, old_flavor, volume_backed)
        # The new flavor should show up with resource usage on the target host.
        self.assert_hypervisor_usage(target_rp_uuid, new_flavor, volume_backed)

        # While we have a copy of the instance in each cell database make sure
        # that quota usage is only reporting 1 (because one is hidden).
        self.assert_quota_usage(expected_num_instances=1)

        # For a volume-backed server, at this point there should be two volume
        # attachments for the instance: one tracked in the source cell and
        # one in the target cell.
        if volume_backed:
            self.assertEqual(2, self._count_volume_attachments(server['id']),
                             self.cinder.volume_to_attachment)

        # Assert the expected power state.
        expected_power_state = 4 if stopped else 1
        self.assertEqual(expected_power_state,
                         server['OS-EXT-STS:power_state'],
                         "Unexpected power state after resize.")

        # For an image-backed server, a snapshot image should have been created
        # and then deleted during the resize.
        if volume_backed:
            self.assertEqual('', server['image'])
            self.assertEqual(
                0, len(self.created_images),
                "Unexpected image create during volume-backed resize")
        else:
            # The original image for the server shown in the API should not
            # have changed even if a snapshot was used to create the guest
            # on the dest host.
            self.assertEqual(image_uuid, server['image']['id'])
            self.assertEqual(
                1, len(self.created_images),
                "Unexpected number of images created for image-backed resize")
            # Make sure the temporary snapshot image was deleted; we use the
            # compute images proxy API here which is deprecated so we force the
            # microversion to 2.1.
            with utils.temporary_mutation(self.api, microversion='2.1'):
                self.api.api_get('/images/%s' % self.created_images[0],
                                 check_response_status=[404])

        return server, source_rp_uuid, target_rp_uuid, old_flavor, new_flavor
コード例 #29
0
ファイル: instance_actions.py プロジェクト: arbrandes/nova
 def _get_instance(self, req, context, server_id):
     with utils.temporary_mutation(context, read_deleted='yes'):
         return common.get_instance(self.compute_api, context, server_id)
コード例 #30
0
 def _get_instance(self, req, context, server_id):
     with utils.temporary_mutation(context, read_deleted='yes'):
         return common.get_instance(self.compute_api, context, server_id)
コード例 #31
0
 def _get_provider_uuid_by_host(self, host):
     # We have to temporarily mutate to 2.53 to get the hypervisor UUID.
     with utils.temporary_mutation(self.admin_api, microversion='2.53'):
         return super(ComputeStatusFilterTest211,
                      self)._get_provider_uuid_by_host(host)
コード例 #32
0
ファイル: test_bug_1764556.py プロジェクト: sapcc/nova
    def test_instance_list_deleted_service_with_no_uuid(self):
        """This test covers the following scenario:

        1. create an instance on a host which we'll simulate to be old
           by not having a uuid set
        2. migrate the instance to the "newer" host that has a service uuid
        3. delete the old service/compute node
        4. start a new service with the old hostname (still host1); this will
           also create a new compute_nodes table record for that host/node
        5. migrate the instance back to the host1 service
        6. list instances which will try to online migrate the old service uuid
        """
        host1 = self.start_service('compute', host='host1')

        # Create an instance which will be on host1 since it's the only host.
        server_req = self._build_server(networks='none')
        server = self.api.post_server({'server': server_req})
        self._wait_for_state_change(server, 'ACTIVE')

        # Now we start a 2nd compute which is "upgraded" (has a uuid) and
        # we'll migrate the instance to that host.
        host2 = self.start_service('compute', host='host2')
        self.assertIsNotNone(host2.service_ref.uuid)

        server = self._migrate_server(server, 'host2')

        # Delete the host1 service (which implicitly deletes the host1 compute
        # node record).
        host1.stop()
        self.admin_api.api_delete('/os-services/%s' % host1.service_ref.uuid)
        # We should now only have 1 compute service (host2).
        compute_services = self.admin_api.api_get(
            '/os-services?binary=nova-compute').body['services']
        self.assertEqual(1, len(compute_services))
        # Make sure the compute node is also gone.
        self.admin_api.api_get(
            '/os-hypervisors?hypervisor_hostname_pattern=host1',
            check_response_status=[404])

        # Now recreate the host1 service and compute node by restarting the
        # service.
        self.restart_compute_service(host1)
        # At this point, host1's service should have a uuid.
        self.assertIsNotNone(host1.service_ref.uuid)

        # Sanity check that there are 3 services in the database, but only 1
        # is deleted.
        ctxt = nova_context.get_admin_context()
        with utils.temporary_mutation(ctxt, read_deleted='yes'):
            services = db.service_get_all_by_binary(ctxt, 'nova-compute')
            self.assertEqual(3, len(services))
            deleted_services = [svc for svc in services if svc['deleted']]
            self.assertEqual(1, len(deleted_services))
            deleted_service = deleted_services[0]
            self.assertEqual('host1', deleted_service['host'])

        # Now migrate the instance back to host1.
        self._migrate_server(server, 'host1')

        # Now null out the service uuid to simulate that the deleted host1
        # is old. We have to do this through the DB API directly since the
        # Service object won't allow a null uuid field. We also have to do
        # this *after* deleting the service via the REST API and migrating the
        # server because otherwise that will set a uuid when looking up the
        # service.
        with utils.temporary_mutation(ctxt, read_deleted='yes'):
            service_ref = db.service_update(ctxt, deleted_service['id'],
                                            {'uuid': None})
            self.assertIsNone(service_ref['uuid'])

        # Finally, list servers as an admin so it joins on services to get host
        # information.
        servers = self.admin_api.get_servers(detail=True)
        for server in servers:
            self.assertEqual('UP', server['host_status'])