예제 #1
0
    def setUp(self):
        super(TestLiveMigrateOneOfConcurrentlyCreatedInstances, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')

        fake_network.set_stub_network_methods(self)

        flavors = self.api.get_flavors()
        self.flavor1 = flavors[0]
예제 #2
0
    def setUp(self):
        super(TestResizeWithNoAllocationScheduler, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')

        # Create two compute nodes/services.
        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)

        scheduler_service = self.start_service('scheduler')
        # We need to mock the FilterScheduler to not use Placement so that
        # allocations won't be created during scheduling.
        scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = False

        flavors = self.api.get_flavors()
        self.old_flavor = flavors[0]
        self.new_flavor = flavors[1]
예제 #3
0
    def setUp(self):
        super(TestRescheduleWithVolumesAttached, self).setUp()

        # Use the new attach flow fixture for cinder
        cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self)
        self.cinder = self.useFixture(cinder_fixture)
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))

        fake_network.set_stub_network_methods(self)

        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.admin_api

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.flags(compute_driver='fake.FakeRescheduleDriver')

        self.start_service('conductor')
        self.start_service('scheduler')

        # Start two computes to allow the instance to be rescheduled
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.host1 = self.start_service('compute', host='host1')

        fake.set_nodes(['host2'])
        self.host2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']
예제 #4
0
    def setUp(self):
        super(TestEvacuationWithSourceReturningDuringRebuild, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # Start two computes
        self.computes = {}

        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.computes['host1'] = self.start_service('compute', host='host1')

        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.computes['host2'] = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        self.addCleanup(fake_notifier.reset)

        # Stub out rebuild with a slower method allowing the src compute to be
        # restarted once the migration hits pre-migrating after claiming
        # resources on the dest.
        manager_class = nova.compute.manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def start_src_rebuild(self_, context, instance, *args, **kwargs):
            server = self.api.get_server(instance.uuid)
            # Start the src compute once the migration is pre-migrating.
            self._wait_for_migration_status(server, ['pre-migrating'])
            self.computes.get(self.source_compute).start()
            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_do_rebuild_instance', start_src_rebuild)
예제 #5
0
    def setUp(self):
        super(TestResizeWithCachingScheduler, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        # Configure the CachingScheduler.
        self.flags(driver='caching_scheduler', group='scheduler')
        self.start_service('scheduler')

        # Create two compute nodes/services.
        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)

        flavors = self.api.get_flavors()
        self.old_flavor = flavors[0]
        self.new_flavor = flavors[1]
예제 #6
0
    def setUp(self):
        super(TestRescheduleWithVolumesAttached, self).setUp()

        # Use the new attach flow fixture for cinder
        cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self)
        self.cinder = self.useFixture(cinder_fixture)
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))

        fake_network.set_stub_network_methods(self)

        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.flags(compute_driver='fake.FakeRescheduleDriver')

        self.start_service('conductor')
        self.start_service('scheduler')

        # Start two computes to allow the instance to be rescheduled
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.host1 = self.start_service('compute', host='host1')

        fake.set_nodes(['host2'])
        self.host2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']
예제 #7
0
    def setUp(self):
        super(TestResizeWithNoAllocationScheduler, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')

        # Create two compute nodes/services.
        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)

        scheduler_service = self.start_service('scheduler')
        # We need to mock the FilterScheduler to not use Placement so that
        # allocations won't be created during scheduling.
        scheduler_service.manager.driver.USES_ALLOCATION_CANDIDATES = False

        flavors = self.api.get_flavors()
        self.old_flavor = flavors[0]
        self.new_flavor = flavors[1]
예제 #8
0
    def setUp(self):
        super(TestLiveMigrateOneOfConcurrentlyCreatedInstances, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))

        self.api = api_fixture.admin_api
        self.api.microversion = self.microversion

        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')

        fake_network.set_stub_network_methods(self)

        flavors = self.api.get_flavors()
        self.flavor1 = flavors[0]
예제 #9
0
    def setUp(self):
        super(TestRescheduleWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.api
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we're going to fake one raising
        # RescheduledException to trigger a retry to the other compute host.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        # This is our flag that we set when we hit the first host and
        # made it fail.
        self.failed_host = None
        self.attempts = 0

        def fake_validate_instance_group_policy(_self, *args, **kwargs):
            self.attempts += 1
            if self.failed_host is None:
                # Set the failed_host value to the ComputeManager.host value.
                self.failed_host = _self.host
                raise exception.RescheduledException(instance_uuid='fake',
                                                     reason='Policy violated')

        self.stub_out(
            'nova.compute.manager.ComputeManager.'
            '_validate_instance_group_policy',
            fake_validate_instance_group_policy)
예제 #10
0
    def setUp(self):
        super(TestRescheduleWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.api
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we're going to fake one raising
        # RescheduledException to trigger a retry to the other compute host.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        # This is our flag that we set when we hit the first host and
        # made it fail.
        self.failed_host = None
        self.attempts = 0

        def fake_validate_instance_group_policy(_self, *args, **kwargs):
            self.attempts += 1
            if self.failed_host is None:
                # Set the failed_host value to the ComputeManager.host value.
                self.failed_host = _self.host
                raise exception.RescheduledException(instance_uuid='fake',
                                                     reason='Policy violated')

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_validate_instance_group_policy',
                      fake_validate_instance_group_policy)
예제 #11
0
    def _start_compute(self, host):
        """Start a nova compute service on the given host

        :param host: the name of the host that will be associated to the
                     compute service.
        :return: the nova compute service object
        """
        fake.set_nodes([host])
        self.addCleanup(fake.restore_nodes)
        compute = self.start_service('compute', host=host)
        self.computes[host] = compute
        return compute
예제 #12
0
    def _start_compute(self, host):
        """Start a nova compute service on the given host

        :param host: the name of the host that will be associated to the
                     compute service.
        :return: the nova compute service object
        """
        fake.set_nodes([host])
        self.addCleanup(fake.restore_nodes)
        compute = self.start_service('compute', host=host)
        self.computes[host] = compute
        return compute
예제 #13
0
    def _start_compute(self, host, cell_name=None):
        """Start a nova compute service on the given host

        :param host: the name of the host that will be associated to the
                     compute service.
        :param cell_name: optional name of the cell in which to start the
                          compute service (defaults to cell1)
        :return: the nova compute service object
        """
        fake.set_nodes([host])
        self.addCleanup(fake.restore_nodes)
        compute = self.start_service('compute', host=host, cell=cell_name)
        self.computes[host] = compute
        return compute
예제 #14
0
    def _start_compute(self, host, cell_name=None):
        """Start a nova compute service on the given host

        :param host: the name of the host that will be associated to the
                     compute service.
        :param cell_name: optional name of the cell in which to start the
                          compute service (defaults to cell1)
        :return: the nova compute service object
        """
        fake.set_nodes([host])
        self.addCleanup(fake.restore_nodes)
        compute = self.start_service('compute', host=host, cell=cell_name)
        self.computes[host] = compute
        return compute
예제 #15
0
    def setUp(self):
        super(TestRetryBetweenComputeNodeBuilds, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)

        self.start_service('conductor')
        self.start_service('consoleauth')

        # We start two compute services because we're going to fake one
        # of them to fail the build so we can trigger the retry code.
        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')

        self.scheduler_service = self.start_service('scheduler')

        self.useFixture(cast_as_call.CastAsCall(self))

        self.image_id = self.admin_api.get_images()[0]['id']
        self.flavor_id = self.admin_api.get_flavors()[0]['id']

        # This is our flag that we set when we hit the first host and
        # made it fail.
        self.failed_host = None
        self.attempts = 0

        # We can't stub nova.compute.claims.Claims.__init__ because there is
        # a race where nova.compute.claims.NopClaim will be used instead,
        # see for details:
        #   https://github.com/openstack/nova/blob/bb02d11/nova/compute/
        #   resource_tracker.py#L121-L130
        real_instance_claim =\
                nova.compute.resource_tracker.ResourceTracker.instance_claim

        def fake_instance_claim(_self, *args, **kwargs):
            self.attempts += 1
            if self.failed_host is None:
                # Set the failed_host value to the ResourceTracker.host value.
                self.failed_host = _self.host
                raise exception.ComputeResourcesUnavailable(
                    reason='failure on host %s' % _self.host)
            return real_instance_claim(_self, *args, **kwargs)

        self.stub_out(
            'nova.compute.resource_tracker.ResourceTracker.instance_claim',
            fake_instance_claim)
예제 #16
0
    def test_parallel_evacuate_with_server_group(self):
        self.skipTest('Skipped until bug 1763181 is fixed')
        group_req = {'name': 'a-name', 'policies': ['anti-affinity']}
        group = self.api.post_server_groups(group_req)

        # boot two instances with anti-affinity
        server = {'name': 'server',
                  'imageRef': self.image_id,
                  'flavorRef': self.flavor_id}
        hints = {'group': group['id']}
        created_server1 = self.api.post_server({'server': server,
                                                'os:scheduler_hints': hints})
        server1 = self._wait_for_state_change(self.api,
                                              created_server1, 'ACTIVE')

        created_server2 = self.api.post_server({'server': server,
                                                'os:scheduler_hints': hints})
        server2 = self._wait_for_state_change(self.api,
                                              created_server2, 'ACTIVE')

        # assert that the anti-affinity policy is enforced during the boot
        self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
                            server2['OS-EXT-SRV-ATTR:host'])

        # simulate compute failure on both compute host to allow evacuation
        self.compute1.stop()
        # force it down to avoid waiting for the service group to time out
        self.api.force_down_service('host1', 'nova-compute', True)

        self.compute2.stop()
        self.api.force_down_service('host2', 'nova-compute', True)

        # start a third compute to have place for one of the instances
        fake.set_nodes(['host3'])
        self.compute3 = self.start_service('compute', host='host3')

        # evacuate both instances
        post = {'evacuate': {}}
        self.api.post_server_action(server1['id'], post)
        self.api.post_server_action(server2['id'], post)

        # make sure that the rebuild is started and then finished
        # NOTE(mdbooth): We only get 1 rebuild.start notification here because
        # we validate server group policy (and therefore fail) before emitting
        # rebuild.start.
        fake_notifier.wait_for_versioned_notifications(
            'instance.rebuild.start', n_events=1)
        server1 = self._wait_for_server_parameter(
            self.api, server1, {'OS-EXT-STS:task_state': None})
        server2 = self._wait_for_server_parameter(
            self.api, server2, {'OS-EXT-STS:task_state': None})

        # NOTE(gibi): The instance.host set _after_ the instance state and
        # tast_state is set back to normal so it is not enough to wait for
        # that. The only thing that happens after the instance.host is set to
        # the target host is the migration status setting to done. So we have
        # to wait for that to avoid asserting the wrong host below.
        self._wait_for_migration_status(server1, ['done', 'failed'])
        self._wait_for_migration_status(server2, ['done', 'failed'])

        # get the servers again to have the latest information about their
        # hosts
        server1 = self.api.get_server(server1['id'])
        server2 = self.api.get_server(server2['id'])

        # assert that the anti-affinity policy is enforced during the
        # evacuation
        self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
                            server2['OS-EXT-SRV-ATTR:host'])

        # assert that one of the evacuation was successful and that server is
        # moved to another host and the evacuation of the other server is
        # failed
        if server1['status'] == 'ERROR':
            failed_server = server1
            evacuated_server = server2
        else:
            failed_server = server2
            evacuated_server = server1
        self.assertEqual('ERROR', failed_server['status'])
        self.assertNotEqual('host3', failed_server['OS-EXT-SRV-ATTR:host'])
        self.assertEqual('ACTIVE', evacuated_server['status'])
        self.assertEqual('host3', evacuated_server['OS-EXT-SRV-ATTR:host'])
예제 #17
0
    def setUp(self):
        super(TestParallelEvacuationWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we need two instances with
        # anti-affinity server group policy to be booted
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.compute1 = self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.compute2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        manager_class = nova.compute.manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def fake_rebuild(self_, context, instance, *args, **kwargs):
            # Simulate that the rebuild request of one of the instances
            # reaches the target compute manager significantly later so the
            # rebuild of the other instance can finish before the late
            # validation of the first rebuild.
            # We cannot simply delay the virt driver's rebuild or the
            # manager's _rebuild_default_impl as those run after the late
            # validation
            if instance.host == 'host1':
                # wait for the other instance rebuild to start
                fake_notifier.wait_for_versioned_notifications(
                    'instance.rebuild.start', n_events=1)

            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_do_rebuild_instance', fake_rebuild)
예제 #18
0
    def setUp(self):
        super(TestRetryBetweenComputeNodeBuilds, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)

        self.start_service('conductor')
        self.start_service('consoleauth')

        # Configure a minimal filter scheduler setup.
        self.flags(enabled_filters=['ComputeFilter', 'RetryFilter'],
                   group='filter_scheduler')
        self.start_service('scheduler')

        # We start two compute services because we're going to fake one
        # of them to fail the build so we can trigger the retry code.
        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host2')

        self.useFixture(cast_as_call.CastAsCall(self))

        self.image_id = self.admin_api.get_images()[0]['id']
        self.flavor_id = self.admin_api.get_flavors()[0]['id']

        # This is our flag that we set when we hit the first host and
        # made it fail.
        self.failed_host = None
        self.attempts = 0

        # We can't stub nova.compute.claims.Claims.__init__ because there is
        # a race where nova.compute.claims.NopClaim will be used instead,
        # see for details:
        #   https://github.com/openstack/nova/blob/bb02d11/nova/compute/
        #   resource_tracker.py#L121-L130
        real_instance_claim =\
                nova.compute.resource_tracker.ResourceTracker.instance_claim

        def fake_instance_claim(_self, *args, **kwargs):
            self.attempts += 1
            if self.failed_host is None:
                # Set the failed_host value to the ResourceTracker.host value.
                self.failed_host = _self.host
                raise exception.ComputeResourcesUnavailable(
                    reason='failure on host %s' % _self.host)
            return real_instance_claim(_self, *args, **kwargs)

        self.stub_out(
            'nova.compute.resource_tracker.ResourceTracker.instance_claim',
            fake_instance_claim)
예제 #19
0
 def _start_compute(self, host):
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     self.start_service('compute', host=host)
예제 #20
0
 def _start_compute(self, host):
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     self.start_service('compute', host=host)
예제 #21
0
    def test_parallel_evacuate_with_server_group(self):
        self.skipTest('Skipped until bug 1763181 is fixed')
        group_req = {'name': 'a-name', 'policies': ['anti-affinity']}
        group = self.api.post_server_groups(group_req)

        # boot two instances with anti-affinity
        server = {'name': 'server',
                  'imageRef': self.image_id,
                  'flavorRef': self.flavor_id}
        hints = {'group': group['id']}
        created_server1 = self.api.post_server({'server': server,
                                                'os:scheduler_hints': hints})
        server1 = self._wait_for_state_change(self.api,
                                              created_server1, 'ACTIVE')

        created_server2 = self.api.post_server({'server': server,
                                                'os:scheduler_hints': hints})
        server2 = self._wait_for_state_change(self.api,
                                              created_server2, 'ACTIVE')

        # assert that the anti-affinity policy is enforced during the boot
        self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
                            server2['OS-EXT-SRV-ATTR:host'])

        # simulate compute failure on both compute host to allow evacuation
        self.compute1.stop()
        # force it down to avoid waiting for the service group to time out
        self.api.force_down_service('host1', 'nova-compute', True)

        self.compute2.stop()
        self.api.force_down_service('host2', 'nova-compute', True)

        # start a third compute to have place for one of the instances
        fake.set_nodes(['host3'])
        self.addCleanup(fake.restore_nodes)
        self.compute3 = self.start_service('compute', host='host3')

        # evacuate both instances
        post = {'evacuate': {}}
        self.api.post_server_action(server1['id'], post)
        self.api.post_server_action(server2['id'], post)

        # make sure that the rebuild is started and then finished
        # NOTE(mdbooth): We only get 1 rebuild.start notification here because
        # we validate server group policy (and therefore fail) before emitting
        # rebuild.start.
        fake_notifier.wait_for_versioned_notifications(
            'instance.rebuild.start', n_events=1)
        server1 = self._wait_for_server_parameter(
            self.api, server1, {'OS-EXT-STS:task_state': None})
        server2 = self._wait_for_server_parameter(
            self.api, server2, {'OS-EXT-STS:task_state': None})

        # NOTE(gibi): The instance.host set _after_ the instance state and
        # tast_state is set back to normal so it is not enough to wait for
        # that. The only thing that happens after the instance.host is set to
        # the target host is the migration status setting to done. So we have
        # to wait for that to avoid asserting the wrong host below.
        self._wait_for_migration_status(server1, ['done', 'failed'])
        self._wait_for_migration_status(server2, ['done', 'failed'])

        # get the servers again to have the latest information about their
        # hosts
        server1 = self.api.get_server(server1['id'])
        server2 = self.api.get_server(server2['id'])

        # assert that the anti-affinity policy is enforced during the
        # evacuation
        self.assertNotEqual(server1['OS-EXT-SRV-ATTR:host'],
                            server2['OS-EXT-SRV-ATTR:host'])

        # assert that one of the evacuation was successful and that server is
        # moved to another host and the evacuation of the other server is
        # failed
        if server1['status'] == 'ERROR':
            failed_server = server1
            evacuated_server = server2
        else:
            failed_server = server2
            evacuated_server = server1
        self.assertEqual('ERROR', failed_server['status'])
        self.assertNotEqual('host3', failed_server['OS-EXT-SRV-ATTR:host'])
        self.assertEqual('ACTIVE', evacuated_server['status'])
        self.assertEqual('host3', evacuated_server['OS-EXT-SRV-ATTR:host'])
예제 #22
0
    def setUp(self):
        super(TestParallelEvacuationWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we need two instances with
        # anti-affinity server group policy to be booted
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.compute1 = self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.compute2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        manager_class = nova.compute.manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def fake_rebuild(self_, context, instance, *args, **kwargs):
            # Simulate that the rebuild request of one of the instances
            # reaches the target compute manager significantly later so the
            # rebuild of the other instance can finish before the late
            # validation of the first rebuild.
            # We cannot simply delay the virt driver's rebuild or the
            # manager's _rebuild_default_impl as those run after the late
            # validation
            if instance.host == 'host1':
                # wait for the other instance rebuild to start
                fake_notifier.wait_for_versioned_notifications(
                    'instance.rebuild.start', n_events=1)

            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_do_rebuild_instance', fake_rebuild)