Esempio n. 1
0
    def test_global_request_id(self, mock_endpoint):
        global_request_id = 'req-%s' % uuids.global_request_id

        def assert_app(environ, start_response):
            # Assert the 'X-Openstack-Request-Id' header in the request.
            self.assertIn('HTTP_X_OPENSTACK_REQUEST_ID', environ)
            self.assertEqual(global_request_id,
                             environ['HTTP_X_OPENSTACK_REQUEST_ID'])
            start_response('204 OK', [])
            return []

        with interceptor.RequestsInterceptor(
                app=lambda: assert_app, url=self.url):
            self.client._delete_provider(self.compute_uuid,
                                         global_request_id=global_request_id)
            payload = {
                'name': 'test-resource-provider'
            }
            self.client.post('/resource_providers', payload,
                             global_request_id=global_request_id)
            self.client.put('/resource_providers/%s' % self.compute_uuid,
                            payload,
                            global_request_id=global_request_id)
            self.client.get('/resource_providers/%s' % self.compute_uuid,
                            global_request_id=global_request_id)
Esempio n. 2
0
    def setUp(self):
        super(PlacementFixture, self).setUp()
        if not self.conf_fixture:
            config = cfg.ConfigOpts()
            self.conf_fixture = self.useFixture(config_fixture.Config(config))
        if self.register_opts:
            conf.register_opts(self.conf_fixture.conf)

        if self.db:
            self.useFixture(
                db_fixture.Database(self.conf_fixture, set_config=True))
        policy_opts.set_defaults(self.conf_fixture.conf)
        self.conf_fixture.config(group='api', auth_strategy='noauth2')

        self.conf_fixture.conf([], default_config_files=[])

        self.useFixture(policy_fixture.PolicyFixture(self.conf_fixture))

        if self.use_intercept:
            loader = deploy.loadapp(self.conf_fixture.conf)

            def app():
                return loader

            self.endpoint = 'http://%s/placement' % uuidutils.generate_uuid()
            intercept = interceptor.RequestsInterceptor(app, url=self.endpoint)
            intercept.install_intercept()
            self.addCleanup(intercept.uninstall_intercept)
Esempio n. 3
0
    def setUp(self):
        super(PlacementFixture, self).setUp()

        self.useFixture(ConfPatcher(group='api', auth_strategy='noauth2'))
        loader = deploy.loadapp(CONF)
        app = lambda: loader
        self.endpoint = 'http://%s/placement' % uuidutils.generate_uuid()
        intercept = interceptor.RequestsInterceptor(app, url=self.endpoint)
        intercept.install_intercept()
        self.addCleanup(intercept.uninstall_intercept)
Esempio n. 4
0
    def test_client_report_smoke(self, mock_vbi, mock_endpoint, mock_auth,
                                 mock_cn):
        """Check things go as expected when doing the right things."""
        # TODO(cdent): We should probably also have a test that
        # tests that when allocation or inventory errors happen, we
        # are resilient.
        res_class = fields.ResourceClass.VCPU
        with interceptor.RequestsInterceptor(app=self.app, url=self.url):
            # When we start out there are no resource providers.
            rp = self.client._get_resource_provider(self.compute_uuid)
            self.assertIsNone(rp)

            # Now let's update status for our compute node.
            self.client.update_resource_stats(self.compute_node)

            # So now we have a resource provider
            rp = self.client._get_resource_provider(self.compute_uuid)
            self.assertIsNotNone(rp)

            # TODO(cdent): change this to use the methods built in
            # to the report client to retrieve inventory?
            inventory_url = ('/resource_providers/%s/inventories' %
                             self.compute_uuid)
            resp = self.client.get(inventory_url)
            inventory_data = resp.json()['inventories']
            self.assertEqual(self.compute_node.vcpus,
                             inventory_data[res_class]['total'])

            # Update allocations with our instance
            self.client.update_instance_allocation(self.compute_node,
                                                   self.instance, 1)

            # Check that allocations were made
            resp = self.client.get('/allocations/%s' % self.instance_uuid)
            alloc_data = resp.json()['allocations']
            vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class]
            self.assertEqual(2, vcpu_data)

            # Check that usages are up to date
            resp = self.client.get('/resource_providers/%s/usages' %
                                   self.compute_uuid)
            usage_data = resp.json()['usages']
            vcpu_data = usage_data[res_class]
            self.assertEqual(2, vcpu_data)

            # Delete allocations with our instance
            self.client.update_instance_allocation(self.compute_node,
                                                   self.instance, -1)

            # No usage
            resp = self.client.get('/resource_providers/%s/usages' %
                                   self.compute_uuid)
            usage_data = resp.json()['usages']
            vcpu_data = usage_data[res_class]
            self.assertEqual(0, vcpu_data)
Esempio n. 5
0
 def test_ensure_standard_resource_class(self):
     """Test case for bug #1746615: If placement is running a newer version
     of code than compute, it may have new standard resource classes we
     don't know about.  Make sure this scenario doesn't cause errors in
     set_inventory_for_provider.
     """
     inv = {
         'VCPU': {
             'total': 10,
             'reserved': 0,
             'min_unit': 1,
             'max_unit': 2,
             'step_size': 1,
             'allocation_ratio': 10.0,
         },
         'MEMORY_MB': {
             'total': 1048576,
             'reserved': 2048,
             'min_unit': 1024,
             'max_unit': 131072,
             'step_size': 1024,
             'allocation_ratio': 1.0,
         },
         'DISK_GB': {
             'total': 100,
             'reserved': 1,
             'min_unit': 1,
             'max_unit': 10,
             'step_size': 2,
             'allocation_ratio': 10.0,
         },
         # A standard resource class known by placement, but not locally
         'PCI_DEVICE': {
             'total': 4,
             'reserved': 0,
             'min_unit': 1,
             'max_unit': 4,
             'step_size': 1,
             'allocation_ratio': 1.0,
         },
         'CUSTOM_BANDWIDTH': {
             'total': 1250000,
             'reserved': 10000,
             'min_unit': 5000,
             'max_unit': 250000,
             'step_size': 5000,
             'allocation_ratio': 8.0,
         },
     }
     with interceptor.RequestsInterceptor(app=self.app, url=self.url):
         self.client.update_compute_node(self.context, self.compute_node)
         self.client.set_inventory_for_provider(self.context,
                                                self.compute_uuid,
                                                self.compute_name, inv)
Esempio n. 6
0
    def test_ironic_ocata_to_pike(self, mock_vbi, mock_endpoint, mock_auth,
                                  mock_cn):
        """Check that when going from an Ocata installation with Ironic having
        node's resource class attributes set, that we properly "auto-heal" the
        inventory and allocation records in the placement API to account for
        both the old-style VCPU/MEMORY_MB/DISK_GB resources as well as the new
        custom resource class from Ironic's node.resource_class attribute.
        """
        with interceptor.RequestsInterceptor(app=self.app, url=self.url):
            # Before the resource tracker is "initialized", we shouldn't have
            # any compute nodes in the RT's cache...
            self.assertEqual(0, len(self.rt.compute_nodes))

            # There should not be any records in the placement API since we
            # haven't yet run update_available_resource() in the RT.
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                self.assertEqual(404, self.placement_get_inventory(cn.uuid))

            for inst in self.INSTANCE_FIXTURES.keys():
                self.assertEqual({}, self.placement_get_allocations(inst))

            # Nor should there be any custom resource classes in the placement
            # API, since we haven't had an Ironic node's resource class set yet
            self.assertEqual(0, len(self.placement_get_custom_rcs()))

            # Now "initialize" the resource tracker as if the compute host is a
            # Ocata host, with Ironic virt driver, but the admin has not yet
            # added a resource_class attribute to the Ironic baremetal nodes in
            # her system.
            # NOTE(jaypipes): This is what nova.compute.manager.ComputeManager
            # does when "initializing" the service...
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                nodename = cn.hypervisor_hostname
                self.driver_mock.get_available_resource.return_value = {
                    'hypervisor_hostname': nodename,
                    'hypervisor_type': 'ironic',
                    'hypervisor_version': 0,
                    'vcpus': cn.vcpus,
                    'vcpus_used': cn.vcpus_used,
                    'memory_mb': cn.memory_mb,
                    'memory_mb_used': cn.memory_mb_used,
                    'local_gb': cn.local_gb,
                    'local_gb_used': cn.local_gb_used,
                    'numa_topology': None,
                    'resource_class': None,  # Act like admin hasn't set yet...
                }
                self.driver_mock.get_inventory.return_value = {
                    VCPU: {
                        'total': cn.vcpus,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.vcpus,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    MEMORY_MB: {
                        'total': cn.memory_mb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.memory_mb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                    DISK_GB: {
                        'total': cn.local_gb,
                        'reserved': 0,
                        'min_unit': 1,
                        'max_unit': cn.local_gb,
                        'step_size': 1,
                        'allocation_ratio': 1.0,
                    },
                }
                self.rt.update_available_resource(self.ctx, nodename)

            self.assertEqual(3, len(self.rt.compute_nodes))
            # A canary just to make sure the assertion below about the custom
            # resource class being added wasn't already added somehow...
            crcs = self.placement_get_custom_rcs()
            self.assertNotIn('CUSTOM_SMALL_IRON', crcs)

            # Verify that the placement API has the "old-style" resources in
            # inventory and allocations
            for cn in self.COMPUTE_NODE_FIXTURES.values():
                inv = self.placement_get_inventory(cn.uuid)
                self.assertEqual(3, len(inv))

            # Now "spawn" an instance to the first compute node by calling the
            # RT's instance_claim().
            cn1_obj = self.COMPUTE_NODE_FIXTURES[uuids.cn1]
            cn1_nodename = cn1_obj.hypervisor_hostname
            inst = self.INSTANCE_FIXTURES[uuids.instance1]
            # Since we're pike, the scheduler would have created our
            # allocation for us. So, we can use our old update routine
            # here to mimic that before we go do the compute RT claim,
            # and then the checks below.
            self.rt.reportclient.update_instance_allocation(
                self.ctx, cn1_obj, inst, 1)
            with self.rt.instance_claim(self.ctx, inst, cn1_nodename):
                pass

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)

            # Now we emulate the operator setting ONE of the Ironic node's
            # resource class attribute to the value of a custom resource class
            # and re-run update_available_resource(). We will expect to see the
            # inventory and allocations reset for the first compute node that
            # had an instance on it. The new inventory and allocation records
            # will be for VCPU, MEMORY_MB, DISK_GB, and also a new record for
            # the custom resource class of the Ironic node.
            self.driver_mock.get_available_resource.return_value = {
                'hypervisor_hostname': cn1_obj.hypervisor_hostname,
                'hypervisor_type': 'ironic',
                'hypervisor_version': 0,
                'vcpus': cn1_obj.vcpus,
                'vcpus_used': cn1_obj.vcpus_used,
                'memory_mb': cn1_obj.memory_mb,
                'memory_mb_used': cn1_obj.memory_mb_used,
                'local_gb': cn1_obj.local_gb,
                'local_gb_used': cn1_obj.local_gb_used,
                'numa_topology': None,
                'resource_class': 'small-iron',
            }
            self.driver_mock.get_inventory.return_value = {
                VCPU: {
                    'total': cn1_obj.vcpus,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.vcpus,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                MEMORY_MB: {
                    'total': cn1_obj.memory_mb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.memory_mb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                DISK_GB: {
                    'total': cn1_obj.local_gb,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': cn1_obj.local_gb,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
                'CUSTOM_SMALL_IRON': {
                    'total': 1,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 1,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            self.rt.update_available_resource(self.ctx, cn1_nodename)

            # Verify the auto-creation of the custom resource class, normalized
            # to what the placement API expects
            self.assertIn('CUSTOM_SMALL_IRON', self.placement_get_custom_rcs())

            allocs = self.placement_get_allocations(inst.uuid)
            self.assertEqual(1, len(allocs))
            self.assertIn(uuids.cn1, allocs)

            resources = allocs[uuids.cn1]['resources']
            self.assertEqual(3, len(resources))
            for rc in (VCPU, MEMORY_MB, DISK_GB):
                self.assertIn(rc, resources)
Esempio n. 7
0
    def test_client_report_smoke(self, mock_vbi, mock_endpoint, mock_auth,
                                 mock_cn):
        """Check things go as expected when doing the right things."""
        # TODO(cdent): We should probably also have a test that
        # tests that when allocation or inventory errors happen, we
        # are resilient.
        res_class = fields.ResourceClass.VCPU
        with interceptor.RequestsInterceptor(app=self.app, url=self.url):
            # When we start out there are no resource providers.
            rp = self.client._get_resource_provider(self.compute_uuid)
            self.assertIsNone(rp)

            # Now let's update status for our compute node.
            self.client.update_compute_node(self.compute_node)

            # So now we have a resource provider
            rp = self.client._get_resource_provider(self.compute_uuid)
            self.assertIsNotNone(rp)

            # We should also have an empty list set of aggregate UUID
            # associations
            pam = self.client._provider_aggregate_map
            self.assertIn(self.compute_uuid, pam)
            self.assertEqual(set(), pam[self.compute_uuid])

            # TODO(cdent): change this to use the methods built in
            # to the report client to retrieve inventory?
            inventory_url = ('/resource_providers/%s/inventories' %
                             self.compute_uuid)
            resp = self.client.get(inventory_url)
            inventory_data = resp.json()['inventories']
            self.assertEqual(self.compute_node.vcpus,
                             inventory_data[res_class]['total'])

            # Update allocations with our instance
            self.client.update_instance_allocation(self.compute_node,
                                                   self.instance, 1)

            # Check that allocations were made
            resp = self.client.get('/allocations/%s' % self.instance_uuid)
            alloc_data = resp.json()['allocations']
            vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class]
            self.assertEqual(2, vcpu_data)

            # Check that usages are up to date
            resp = self.client.get('/resource_providers/%s/usages' %
                                   self.compute_uuid)
            usage_data = resp.json()['usages']
            vcpu_data = usage_data[res_class]
            self.assertEqual(2, vcpu_data)

            # Delete allocations with our instance
            self.client.update_instance_allocation(self.compute_node,
                                                   self.instance, -1)

            # No usage
            resp = self.client.get('/resource_providers/%s/usages' %
                                   self.compute_uuid)
            usage_data = resp.json()['usages']
            vcpu_data = usage_data[res_class]
            self.assertEqual(0, vcpu_data)

            # Trigger the reporting client deleting all inventory by setting
            # the compute node's CPU, RAM and disk amounts to 0.
            self.compute_node.vcpus = 0
            self.compute_node.memory_mb = 0
            self.compute_node.local_gb = 0
            self.client.update_compute_node(self.compute_node)

            # Check there's no more inventory records
            resp = self.client.get(inventory_url)
            inventory_data = resp.json()['inventories']
            self.assertEqual({}, inventory_data)

            # Try setting some invalid inventory and make sure the report
            # client raises the expected error.
            inv_data = {
                'BAD_FOO': {
                    'total': 100,
                    'reserved': 0,
                    'min_unit': 1,
                    'max_unit': 100,
                    'step_size': 1,
                    'allocation_ratio': 1.0,
                },
            }
            self.assertRaises(exception.InvalidResourceClass,
                              self.client.set_inventory_for_provider,
                              self.compute_uuid, self.compute_name, inv_data)
Esempio n. 8
0
 def _interceptor(self):
     # Isolate this initialization for maintainability.
     return interceptor.RequestsInterceptor(app=self.app, url=self.url)
Esempio n. 9
0
 def setup(self):
     """Set up a server for testing."""
     self.intercept = interceptor.RequestsInterceptor(lambda: app, host='127.0.0.1', port=7373)
     self.intercept.__enter__()
Esempio n. 10
0
 def test_ensure_standard_resource_class(self):
     """Test case for bug #1746615: If placement is running a newer version
     of code than compute, it may have new standard resource classes we
     don't know about.  Make sure this scenario doesn't cause errors in
     set_inventory_for_provider.
     """
     inv = {
         'VCPU': {
             'total': 10,
             'reserved': 0,
             'min_unit': 1,
             'max_unit': 2,
             'step_size': 1,
             'allocation_ratio': 10.0,
         },
         'MEMORY_MB': {
             'total': 1048576,
             'reserved': 2048,
             'min_unit': 1024,
             'max_unit': 131072,
             'step_size': 1024,
             'allocation_ratio': 1.0,
         },
         'DISK_GB': {
             'total': 100,
             'reserved': 1,
             'min_unit': 1,
             'max_unit': 10,
             'step_size': 2,
             'allocation_ratio': 10.0,
         },
         # A standard resource class known by placement, but not locally
         'PCI_DEVICE': {
             'total': 4,
             'reserved': 0,
             'min_unit': 1,
             'max_unit': 4,
             'step_size': 1,
             'allocation_ratio': 1.0,
         },
         'CUSTOM_BANDWIDTH': {
             'total': 1250000,
             'reserved': 10000,
             'min_unit': 5000,
             'max_unit': 250000,
             'step_size': 5000,
             'allocation_ratio': 8.0,
         },
     }
     with interceptor.RequestsInterceptor(app=self.app, url=self.url):
         self.client.update_compute_node(self.context, self.compute_node)
         # Simulate that our locally-running code has an outdated notion of
         # standard resource classes.
         with mock.patch.object(fields.ResourceClass, 'STANDARD',
                                ('VCPU', 'MEMORY_MB', 'DISK_GB')):
             # TODO(efried): Once bug #1746615 is fixed, this will no longer
             # raise, and can be replaced with:
             # self.client.set_inventory_for_provider(
             #     self.context, self.compute_uuid, self.compute_name, inv)
             self.assertRaises(exception.InvalidResourceClass,
                               self.client.set_inventory_for_provider,
                               self.context, self.compute_uuid,
                               self.compute_name, inv)