def setUp(self):
        super(TestParallelEvacuationWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        # the image fake backend needed for image discovery
        self.useFixture(nova_fixtures.GlanceFixture(self))

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we need two instances with
        # anti-affinity server group policy to be booted
        self.compute1 = self.start_service('compute', host='host1')
        self.compute2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        manager_class = manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def fake_rebuild(self_, context, instance, *args, **kwargs):
            # Simulate that the rebuild request of one of the instances
            # reaches the target compute manager significantly later so the
            # rebuild of the other instance can finish before the late
            # validation of the first rebuild.
            # We cannot simply delay the virt driver's rebuild or the
            # manager's _rebuild_default_impl as those run after the late
            # validation
            if instance.host == 'host1':
                # wait for the other instance rebuild to start
                fake_notifier.wait_for_versioned_notifications(
                    'instance.rebuild.start', n_events=1)

            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out(
            'nova.compute.manager.ComputeManager.'
            '_do_rebuild_instance', fake_rebuild)
Пример #2
0
    def setUp(self):
        super(TestResizeWithNoAllocationScheduler, self).setUp()

        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        self.start_service('conductor')

        # Create two compute nodes/services.
        for host in ('host1', 'host2'):
            self.start_service('compute', host=host)

        scheduler_service = self.start_service('scheduler')
        # We need to mock the FilterScheduler to not use Placement so that
        # allocations won't be created during scheduling.
        scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = False

        flavors = self.api.get_flavors()
        self.old_flavor = flavors[0]
        self.new_flavor = flavors[1]
Пример #3
0
    def setUp(self):
        super(ServerGroupTestBase, self).setUp()
        self.flags(enabled_filters=self._enabled_filters,
                   group='filter_scheduler')
        # NOTE(sbauza): Don't verify VCPUS and disks given the current nodes.
        self.flags(cpu_allocation_ratio=9999.0)
        self.flags(disk_allocation_ratio=9999.0)
        self.flags(weight_classes=self._get_weight_classes(),
                   group='filter_scheduler')

        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))

        self.useFixture(func_fixtures.PlacementFixture())
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.api.microversion = self.microversion
        self.admin_api = api_fixture.admin_api
        self.admin_api.microversion = self.microversion

        self.start_service('conductor')
        self.start_service('scheduler')
Пример #4
0
 def setUp(self):
     super(_FakeDriverBackendTestCase, self).setUp()
     # TODO(sdague): it would be nice to do this in a way that only
     # the relevant backends where replaced for tests, though this
     # should not harm anything by doing it for all backends
     self.useFixture(nova_fixtures.GlanceFixture(self))
     self._setup_fakelibvirt()
Пример #5
0
    def setUp(self):
        super(TestDeleteWhileBooting, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.GlanceFixture(self))

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.api

        self.ctxt = nova_context.get_context()

        # We intentionally do not start a conductor or scheduler service, since
        # our goal is to simulate an instance that has not been scheduled yet.

        # Kick off a server create request and move on once it's in the BUILD
        # state. Since we have no conductor or scheduler service running, the
        # server will "hang" in an unscheduled state for testing.
        self.server = self._create_server(expected_state='BUILD')
        # Simulate that a different request has deleted the build request
        # record after this delete request has begun processing. (The first
        # lookup of the build request occurs in the servers API to get the
        # instance object in order to delete it).
        # We need to get the build request now before we mock the method.
        self.br = objects.BuildRequest.get_by_instance_uuid(
            self.ctxt, self.server['id'])
Пример #6
0
    def setUp(self):
        super(MetadataTest, self).setUp()

        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())
        self.start_service('conductor')
        self.start_service('scheduler')
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).api
        self.start_service('compute')

        # create a server for the tests
        server = self._build_server(name='test')
        server = self.api.post_server({'server': server})
        self.server = self._wait_for_state_change(server, 'ACTIVE')

        self.api_fixture = self.useFixture(nova_fixtures.OSMetadataServer())
        self.md_url = self.api_fixture.md_url

        # make sure that the metadata service returns information about the
        # server we created above
        def fake_get_fixed_ip_by_address(self, ctxt, address):
            return {'instance_uuid': server['id']}

        self.useFixture(
            fixtures.MonkeyPatch(
                'nova.network.neutron.API.get_fixed_ip_by_address',
                fake_get_fixed_ip_by_address))
Пример #7
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Пример #8
0
    def setUp(self):
        super(SecgroupsFullstack, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.GlanceFixture(self))
        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture())

        self.api = api_fixture.api
Пример #9
0
    def setUp(self):
        super(TestLocalDeleteAttachedVolumes, self).setUp()
        self.useFixture(nova_fixtures.RealPolicyFixture())
        # We need the CinderFixture to stub out the volume API.
        self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))
        # Use the PlacementFixture to avoid annoying warnings in the logs.
        self.useFixture(func_fixtures.PlacementFixture())
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api
        # We want to use 2.37 for passing networks='none' on server create.
        # We also need this since you can only attach a volume to a
        # shelved-offloaded server in microversion 2.20+.
        self.api.microversion = 'latest'

        # the image fake backend needed for image discovery
        self.useFixture(nova_fixtures.GlanceFixture(self))

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('compute')

        self.useFixture(nova_fixtures.CastAsCallFixture(self))

        self.flavor_id = self.api.get_flavors()[0]['id']
    def setUp(self):
        super(TestBootFromVolumeIsolatedHostsFilter, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.CinderFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api

        self.start_service('conductor')

        # Add the IsolatedHostsFilter to the list of enabled filters since it
        # is not enabled by default.
        enabled_filters = CONF.filter_scheduler.enabled_filters
        enabled_filters.append('IsolatedHostsFilter')
        self.flags(
            enabled_filters=enabled_filters,
            isolated_images=[self.glance.auto_disk_config_enabled_image['id']],
            isolated_hosts=['host1'],
            restrict_isolated_hosts_to_isolated_images=True,
            group='filter_scheduler')
        self.start_service('scheduler')

        # Create two compute nodes/services so we can restrict the image
        # we'll use to one of the hosts.
        for host in ('host1', 'host2'):
            self.start_service('compute', host=host)
 def setUp(self):
     super(CrossAZAttachTestCase, self).setUp()
     # Use the standard fixtures.
     self.useFixture(policy_fixture.RealPolicyFixture())
     self.useFixture(nova_fixtures.CinderFixture(self, az=self.az))
     self.useFixture(nova_fixtures.GlanceFixture(self))
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     # Start nova controller services.
     self.api = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
     self.start_service('conductor')
     self.start_service('scheduler')
     # Start one compute service and add it to the AZ. This allows us to
     # get past the AvailabilityZoneFilter and build a server.
     self.start_service('compute', host='host1')
     agg_id = self.api.post_aggregate(
         {'aggregate': {
             'name': self.az,
             'availability_zone': self.az
         }})['id']
     self.api.api_post('/os-aggregates/%s/action' % agg_id,
                       {'add_host': {
                           'host': 'host1'
                       }})
    def setUp(self):
        super(TestServerGet, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api

        # NOTE(mriedem): This image has an invalid architecture metadata value
        # and is used for negative testing in the functional stack.
        timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
        image = {
            'id': 'c456eb30-91d7-4f43-8f46-2efd9eccd744',
            'name': 'fake-image-invalid-arch',
            'created_at': timestamp,
            'updated_at': timestamp,
            'deleted_at': None,
            'deleted': False,
            'status': 'active',
            'is_public': False,
            'container_format': 'raw',
            'disk_format': 'raw',
            'size': '25165824',
            'properties': {
                'kernel_id': 'nokernel',
                'ramdisk_id': 'nokernel',
                'architecture': 'x64'
            }
        }
        self.image_id = self.glance.create(None, image)['id']
        self.flavor_id = self.api.get_flavors()[0]['id']
Пример #13
0
    def setUp(self):
        super(TestRescheduleWithVolumesAttached, self).setUp()

        # Use the new attach flow fixture for cinder
        self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.GlanceFixture(self))

        fake_network.set_stub_network_methods(self)

        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.admin_api

        self.flags(compute_driver='fake.FakeRescheduleDriver')

        self.start_service('conductor')
        self.start_service('scheduler')

        # Start two computes to allow the instance to be rescheduled
        self.host1 = self.start_service('compute', host='host1')

        self.host2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']
    def setUp(self):
        super(AntiAffinityMultiCreateRequest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        self.start_service('conductor')

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        self.useFixture(nova_fixtures.HostNameWeigherFixture())
        # disable late check on compute node to mimic devstack.
        self.flags(disable_group_policy_check_upcall=True, group='workarounds')
        self.start_service('scheduler')

        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')
    def setUp(self):
        super(BootFromVolumeLargeRequestTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.CinderFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
Пример #16
0
 def setUp(self):
     super(FillVirtualInterfaceListMigration, self).setUp()
     api_fixture = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1'))
     self.api = api_fixture.admin_api
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(nova_fixtures.GlanceFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     self.start_service('conductor')
     self.start_service('scheduler')
     self.start_service('compute')
Пример #17
0
    def setUp(self):
        super(TestServerValidation, self).setUp()
        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.useFixture(nova_fixtures.GlanceFixture(self))

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']
Пример #18
0
    def setUp(self):
        super(TestEvacuationWithSourceReturningDuringRebuild, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        # the image fake backend needed for image discovery
        self.useFixture(nova_fixtures.GlanceFixture(self))

        self.start_service('conductor')
        self.start_service('scheduler')

        # Start two computes
        self._start_compute('host1')
        self._start_compute('host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        self.addCleanup(fake_notifier.reset)

        # Stub out rebuild with a slower method allowing the src compute to be
        # restarted once the migration hits pre-migrating after claiming
        # resources on the dest.
        manager_class = manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def start_src_rebuild(self_, context, instance, *args, **kwargs):
            server = self.api.get_server(instance.uuid)
            # Start the src compute once the migration is pre-migrating.
            self._wait_for_migration_status(server, ['pre-migrating'])
            self.computes.get(self.source_compute).start()
            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out(
            'nova.compute.manager.ComputeManager.'
            '_do_rebuild_instance', start_src_rebuild)
Пример #19
0
    def setUp(self):
        super(TestRescheduleWithServerGroup, self).setUp()

        self.useFixture(nova_fixtures.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api

        # the image fake backend needed for image discovery
        self.useFixture(nova_fixtures.GlanceFixture(self))

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we're going to fake one raising
        # RescheduledException to trigger a retry to the other compute host.
        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        # This is our flag that we set when we hit the first host and
        # made it fail.
        self.failed_host = None
        self.attempts = 0

        def fake_validate_instance_group_policy(_self, *args, **kwargs):
            self.attempts += 1
            if self.failed_host is None:
                # Set the failed_host value to the ComputeManager.host value.
                self.failed_host = _self.host
                raise exception.RescheduledException(instance_uuid='fake',
                                                     reason='Policy violated')

        self.stub_out(
            'nova.compute.manager.ComputeManager.'
            '_validate_instance_group_policy',
            fake_validate_instance_group_policy)
Пример #20
0
    def setUp(self):
        super(ListDeletedServersWithMarker, self).setUp()
        # Start standard fixtures.
        self.useFixture(func_fixtures.PlacementFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.GlanceFixture(self))

        # Start nova services.
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('compute')
Пример #21
0
    def setUp(self):
        super(ServersPreSchedulingTestCase, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NoopConductorFixture())
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.api.microversion = 'latest'
        self.useFixture(
            nova_fixtures.SingleCellSimple(instances_created=False))
 def setUp(self):
     super(TestNeutronExternalNetworks, self).setUp()
     # Use the standard fixtures.
     self.useFixture(policy_fixture.RealPolicyFixture())
     self.useFixture(func_fixtures.PlacementFixture())
     self.useFixture(nova_fixtures.GlanceFixture(self))
     neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
     self._setup_external_network(neutron)
     # Start nova controller services.
     api_fixture = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1'))
     self.api = api_fixture.api
     self.start_service('conductor')
     self.start_service('scheduler')
     self.start_service('compute', host='host1')
Пример #23
0
    def setUp(self):
        super(ServerFaultTestCase, self).setUp()
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())
        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(nova_fixtures.RealPolicyFixture())

        # Start the compute services.
        self.start_service('conductor')
        self.start_service('scheduler')
        self.compute = self.start_service('compute')
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
Пример #24
0
    def setUp(self):
        super(_IntegratedTestBase, self).setUp()

        self.useFixture(cast_as_call.CastAsCall(self))

        self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
        self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
        self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.policy = self.useFixture(policy_fixture.RealPolicyFixture())

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self._setup_services()
Пример #25
0
    def setUp(self):
        super(TestMultiCreateServerGroupMemberOverQuota, self).setUp()
        self.flags(server_group_members=2, group='quota')
        self.useFixture(nova_fixtures.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.api
        self.api.microversion = '2.37'  # so we can specify networks='none'

        group = {'name': 'test group', 'policies': ['soft-anti-affinity']}
        self.created_group = self.api.post_server_groups(group)
Пример #26
0
    def setUp(self):
        super(SchedulerOnlyChecksTargetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        self.useFixture(nova_fixtures.GlanceFixture(self))

        self.start_service('conductor')

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Define a very basic scheduler that only verifies if host is down.
        self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler')
        # NOTE(sbauza): Use the HostNameWeigherFixture so we are sure that
        # we prefer first host1 for the boot request and forget about any
        # other weigher.
        # Host2 should only be preferred over host3 if and only if that's the
        # only host we verify (as requested_destination does).
        self.useFixture(
            nova_fixtures.HostNameWeigherFixture(weights={
                'host1': 100,
                'host2': 1,
                'host3': 50
            }))
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')
        self.start_service('compute', host='host3')
        self.useFixture(cast_as_call.CastAsCall(self))
Пример #27
0
    def test_end_to_end(self):
        """This test emulates a full end to end test showing that without this
        feature a vm cannot be spawning using a custom trait and then start a
        compute service that provides that trait.
        """

        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.GlanceFixture(self))

        # Start nova services.
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
        self.api.microversion = 'latest'
        self.start_service('conductor')
        # start nova-compute that will not have the additional trait.
        self._start_compute("fake-host-1")

        node_name = "fake-host-2"

        # create a config file with explicit name
        provider_config = self._create_config_entry(node_name,
                                                    id_method="name")
        self._place_config_file("provider_config.yaml", provider_config)

        self._create_flavor(name='CUSTOM_Flavor',
                            id=42,
                            vcpu=4,
                            memory_mb=4096,
                            disk=1024,
                            swap=0,
                            extra_spec={
                                f"trait:{os_traits.normalize_name(node_name)}":
                                "required"
                            })

        self._create_server(flavor_id=42,
                            expected_state='ERROR',
                            networks=[{
                                'port': self.neutron.port_1['id']
                            }])

        # start compute node that will report the custom trait.
        self._start_compute("fake-host-2")
        self._create_server(flavor_id=42,
                            expected_state='ACTIVE',
                            networks=[{
                                'port': self.neutron.port_1['id']
                            }])
Пример #28
0
 def setUp(self):
     super(TestInstanceActionBuryInCell0, self).setUp()
     # Setup common fixtures.
     self.useFixture(func_fixtures.PlacementFixture())
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(nova_fixtures.GlanceFixture(self))
     policy = self.useFixture(nova_fixtures.RealPolicyFixture())
     # Allow non-admins to see instance action events.
     policy.set_rules({
         'os_compute_api:os-instance-actions:events': 'rule:admin_or_owner'
     }, overwrite=False)
     # Setup controller services.
     self.start_service('conductor')
     self.start_service('scheduler')
     self.api = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1')).api
Пример #29
0
    def setUp(self):
        """Shared implementation for tests below that create instance."""
        super(ServersControllerCreateTestV21, self).setUp()

        self.instance_cache_num = 0
        fakes.stub_out_nw_api(self)
        self._set_up_controller()

        def create_db_entry_for_new_instance(*args, **kwargs):
            instance = args[4]
            instance.uuid = FAKE_UUID
            return instance

        self.useFixture(fixtures.GlanceFixture(self))
        self.stub_out('nova.compute.api.API.create_db_entry_for_new_instance',
                      create_db_entry_for_new_instance)
Пример #30
0
    def setUp(self):
        super(TestLocalDeleteAllocations, self).setUp()
        self.useFixture(nova_fixtures.RealPolicyFixture())
        # The NeutronFixture is needed to show security groups for a server.
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.GlanceFixture(self))
        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
        # We need the latest microversion to force-down the compute service
        self.admin_api.microversion = 'latest'

        self.start_service('conductor')

        self.start_service('scheduler')