Exemple #1
0
    def setUp(self):
        # Default to enabling the filter
        self.flags(enable_isolated_aggregate_filtering=True,
                   group='scheduler')

        # Use a custom weigher that would prefer host1 if the isolate
        # aggregate filter were not in place otherwise it's not deterministic
        # whether we're landing on host2 because of the filter or just by
        # chance.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        super(IsolateAggregateFilterTest, self).setUp()
        self.image_service = nova.tests.unit.image.fake.FakeImageService()
        # setting traits to flavors
        flavor_body = {'flavor': {'name': 'test_flavor',
                                  'ram': 512,
                                  'vcpus': 1,
                                  'disk': 1
                                  }}
        self.flavor_with_trait_dxva = self.api.post_flavor(flavor_body)
        self.admin_api.post_extra_spec(
            self.flavor_with_trait_dxva['id'],
            {'extra_specs': {'trait:HW_GPU_API_DXVA': 'required'}})
        flavor_body['flavor']['name'] = 'test_flavor1'
        self.flavor_with_trait_sgx = self.api.post_flavor(flavor_body)
        self.admin_api.post_extra_spec(
            self.flavor_with_trait_sgx['id'],
            {'extra_specs': {'trait:HW_CPU_X86_SGX': 'required'}})
        self.flavor_without_trait = self.flavors[0]

        with nova.utils.temporary_mutation(self.api, microversion='2.35'):
            images = self.api.get_images()
            self.image_id_without_trait = images[0]['id']
Exemple #2
0
    def setUp(self):
        super(AntiAffinityMultiCreateRequest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        self.useFixture(nova_fixtures.HostNameWeigherFixture())
        # disable late check on compute node to mimic devstack.
        self.flags(disable_group_policy_check_upcall=True,
                   group='workarounds')
        self.start_service('scheduler')

        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')
Exemple #3
0
 def setUp(self):
     super(MissingReqSpecInstanceGroupUUIDTestCase, self).setUp()
     # Stub out external dependencies.
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     fake_image.stub_out_image_service(self)
     self.addCleanup(fake_image.FakeImageService_reset)
     # Configure the API to allow resizing to the same host so we can keep
     # the number of computes down to two in the test.
     self.flags(allow_resize_to_same_host=True)
     # Start nova controller services.
     api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
         api_version='v2.1'))
     self.api = api_fixture.admin_api
     self.start_service('conductor')
     # Use a custom weigher to make sure that we have a predictable
     # scheduling sort order.
     self.useFixture(nova_fixtures.HostNameWeigherFixture())
     self.start_service('scheduler')
     # Start two computes, one where the server will be created and another
     # where we'll cold migrate it.
     self.computes = {}  # keep track of the compute services per host name
     for host in ('host1', 'host2'):
         compute_service = self.start_service('compute', host=host)
         self.computes[host] = compute_service
    def setUp(self):
        # Default to enabling the filter
        self.flags(query_placement_for_availability_zone=True,
                   group='scheduler')

        # Use custom weigher to make sure that we have a predictable
        # scheduling sort order.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())
        super(AvailabilityZoneFilterTest, self).setUp()
Exemple #5
0
    def setUp(self):
        # Use a custom weigher to make sure that we have a predictable host
        # selection order during scheduling
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        super(PinnedComputeRpcTests, self).setUp()

        self.compute1 = self._start_compute(host='host1')
        self.compute2 = self._start_compute(host='host2')
        self.compute3 = self._start_compute(host='host3')
Exemple #6
0
    def setUp(self):
        super(SchedulerOnlyChecksTargetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.flags(use_neutron=True)
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Define a very basic scheduler that only verifies if host is down.
        self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler')
        # NOTE(sbauza): Use the HostNameWeigherFixture so we are sure that
        # we prefer first host1 for the boot request and forget about any
        # other weigher.
        # Host2 should only be preferred over host3 if and only if that's the
        # only host we verify (as requested_destination does).
        self.useFixture(
            nova_fixtures.HostNameWeigherFixture(weights={
                'host1': 100,
                'host2': 1,
                'host3': 50
            }))
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')
        self.start_service('compute', host='host3')
        self.useFixture(cast_as_call.CastAsCall(self))
Exemple #7
0
    def setUp(self):
        self.flags(query_placement_for_routed_network_aggregates=True,
                   group='scheduler')

        # We will create 5 hosts, let's make sure we order them by their index.
        weights = {
            'host1': 500,
            'host2': 400,
            'host3': 300,
            'host4': 200,
            'host5': 100
        }
        self.useFixture(nova_fixtures.HostNameWeigherFixture(weights=weights))
        super().setUp()

        # Amend the usual neutron fixture with specific routed networks
        self.neutron = self.useFixture(NeutronRoutedNetworksFixture(self))

        # let's create 5 computes with their respective records
        for i in range(1, 6):
            setattr(self, 'compute%s' % i, self._start_compute('host%s' % i))
            setattr(self, 'compute%s_rp_uuid' % i,
                    self._get_provider_uuid_by_host('host%s' % i))
            setattr(
                self, 'compute%s_service_id' % i,
                self.admin_api.get_services(host='host%s' % i,
                                            binary='nova-compute')[0]['id'])

        # Simulate the placement setup neutron does for multi segment networks
        segment_ids = [segment["id"] for segment in self.neutron.segments]
        self.assertEqual(2, len(segment_ids))

        # We have 5 computes and the network has two segments. Let's create a
        # setup where the network has segments on host2 to host5 but not on
        # host1. The HostNameWeigherFixture prefers host1 over host2 over host3
        # over host4 over host5. So this way we can check if the scheduler
        # selects host with available network segment.
        # The segments are for this net :
        #  * segment 0 is for host2, host4 and host5
        #  * segment 2 is for host3 and host5
        self.segment_id_to_compute_rp_uuid = {
            segment_ids[0]: [
                self.compute2_rp_uuid, self.compute4_rp_uuid,
                self.compute5_rp_uuid
            ],
            segment_ids[1]: [self.compute3_rp_uuid, self.compute5_rp_uuid],
        }

        self._create_multisegment_placement_setup(
            self.segment_id_to_compute_rp_uuid)
    def setUp(self):
        # Use a custom weigher to make sure that we have a predictable host
        # selection order during scheduling
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        super(PinnedComputeRpcTests, self).setUp()
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.compute1 = self._start_compute(host='host1')
        self.compute2 = self._start_compute(host='host2')
        self.compute3 = self._start_compute(host='host3')

        flavors = self.api.get_flavors()
        self.flavor1 = flavors[0]
Exemple #9
0
    def setUp(self):
        # Default to enabling the filter
        self.flags(query_placement_for_availability_zone=True,
                   group='scheduler')

        # Use custom weigher to make sure that we have a predictable
        # scheduling sort order.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        # NOTE(danms): Do this before calling setUp() so that
        # the scheduler service that is started sees the new value
        filters = CONF.filter_scheduler.enabled_filters
        filters.remove('AvailabilityZoneFilter')
        self.flags(enabled_filters=filters, group='filter_scheduler')

        super(AvailabilityZoneFilterTest, self).setUp()
Exemple #10
0
    def setUp(self):
        # Use a custom weigher that would prefer host1 if the forbidden
        # aggregate filter were not in place otherwise it's not deterministic
        # whether we're landing on host2 because of the filter or just by
        # chance.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        # NOTE(danms): Do this before calling setUp() so that
        # the scheduler service that is started sees the new value
        filters = CONF.filter_scheduler.enabled_filters
        filters.remove('AvailabilityZoneFilter')

        # NOTE(shilpasd): To test `isolate_aggregates` request filter, removed
        # following filters which also filters hosts based on aggregate
        # metadata.
        if 'AggregateImagePropertiesIsolation' in filters:
            filters.remove('AggregateImagePropertiesIsolation')
        if 'AggregateInstanceExtraSpecsFilter' in filters:
            filters.remove('AggregateInstanceExtraSpecsFilter')
        self.flags(enabled_filters=filters, group='filter_scheduler')

        super(TestAggregateFiltersTogether, self).setUp()

        # Default to enabling all filters
        self.flags(limit_tenants_to_placement_aggregate=True,
                   group='scheduler')
        self.flags(placement_aggregate_required_for_tenants=True,
                   group='scheduler')
        self.flags(query_placement_for_availability_zone=True,
                   group='scheduler')
        self.flags(enable_isolated_aggregate_filtering=True, group='scheduler')
        # setting traits to flavors
        flavor_body = {
            'flavor': {
                'name': 'test_flavor',
                'ram': 512,
                'vcpus': 1,
                'disk': 1
            }
        }
        self.flavor_with_trait_dxva = self.api.post_flavor(flavor_body)
        self.admin_api.post_extra_spec(
            self.flavor_with_trait_dxva['id'],
            {'extra_specs': {
                'trait:HW_GPU_API_DXVA': 'required'
            }})
    def setUp(self):
        # Need to enable the JsonFilter before starting the scheduler service
        # in the parent class.
        enabled_filters = CONF.filter_scheduler.enabled_filters
        if 'JsonFilter' not in enabled_filters:
            enabled_filters.append('JsonFilter')
            self.flags(enabled_filters=enabled_filters,
                       group='filter_scheduler')

        # Use our custom weigher defined above to make sure that we have
        # a predictable scheduling sort order during server create.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        super(JsonFilterTestCase, self).setUp()

        # Now create two compute services which will have unique host and
        # node names.
        self._start_compute('host1')
        self._start_compute('host2')
Exemple #12
0
    def setUp(self):
        super(TestRequestSpecRetryReschedule, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Use custom weigher to make sure that we have a predictable
        # scheduling sort order.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        for host in ['host1', 'host2', 'host3']:
            self.start_service('compute', host=host)
Exemple #13
0
 def setUp(self):
     # Register a custom weigher for predictable scheduling results.
     self.useFixture(nova_fixtures.HostNameWeigherFixture())
     super(MultiCellEvacuateTestCase, self).setUp()
Exemple #14
0
    def setUp(self):
        super(AggregateMultiTenancyIsolationColdMigrateTest, self).setUp()
        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())
        # Intentionally keep these separate since we want to create the
        # server with the non-admin user in a different project.
        admin_api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1',
                                       project_id=uuids.admin_project))
        self.admin_api = admin_api_fixture.admin_api
        self.admin_api.microversion = 'latest'
        user_api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1',
                                       project_id=uuids.user_project))
        self.api = user_api_fixture.api
        self.api.microversion = 'latest'

        self.start_service('conductor')
        # Enable the AggregateMultiTenancyIsolation filter before starting the
        # scheduler service.
        enabled_filters = CONF.filter_scheduler.enabled_filters
        if 'AggregateMultiTenancyIsolation' not in enabled_filters:
            enabled_filters.append('AggregateMultiTenancyIsolation')
            self.flags(enabled_filters=enabled_filters,
                       group='filter_scheduler')
        # Add a custom weigher which will weigh host1, which will be in the
        # admin project aggregate, higher than the other hosts which are in
        # the non-admin project aggregate.
        self.useFixture(nova_fixtures.HostNameWeigherFixture())
        self.start_service('scheduler')

        for host in ('host1', 'host2', 'host3'):
            self.start_service('compute', host=host)

        # Create an admin-only aggregate for the admin project. This is needed
        # because if host1 is not in an aggregate with the filter_tenant_id
        # metadata key, the filter will accept that host even for the non-admin
        # project.
        admin_aggregate = self._create_aggregate(self.admin_api,
                                                 'admin-aggregate')
        self._add_host_to_aggregate(self.admin_api, admin_aggregate, 'host1')

        # Restrict the admin project to the admin aggregate.
        self._isolate_aggregate(self.admin_api, admin_aggregate,
                                uuids.admin_project)

        # Create the tenant aggregate for the non-admin project.
        tenant_aggregate = self._create_aggregate(self.admin_api,
                                                  'tenant-aggregate')

        # Add two compute hosts to the tenant aggregate. We exclude host1
        # since that is weighed higher due to HostNameWeigherFixture and we
        # want to ensure the scheduler properly filters out host1 before we
        # even get to weighing the selected hosts.
        for host in ('host2', 'host3'):
            self._add_host_to_aggregate(self.admin_api, tenant_aggregate, host)

        # Restrict the non-admin project to the tenant aggregate.
        self._isolate_aggregate(self.admin_api, tenant_aggregate,
                                uuids.user_project)