コード例 #1
0
ファイル: test_bug_1815153.py プロジェクト: mahak/nova
    def setUp(self):
        super(NonPersistentFieldNotResetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.api.microversion = 'latest'

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        self.compute = {}

        self.addCleanup(fake.restore_nodes)
        for host in ('host1', 'host2', 'host3'):
            fake.set_nodes([host])
            compute_service = self.start_service('compute', host=host)
            self.compute.update({host: compute_service})

        self.ctxt = context.get_admin_context()
コード例 #2
0
ファイル: test_multiple_nodes.py プロジェクト: jorgevgut/nova
    def test_compute_manager_removes_deleted_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(["A", "B"])

        fake_compute_nodes = [
            objects.ComputeNode(context=ctx, hypervisor_hostname="A", id=2),
            objects.ComputeNode(context=ctx, hypervisor_hostname="B", id=3),
        ]

        def fake_get_compute_nodes_in_db(context, use_slave=False):
            return fake_compute_nodes

        def fake_compute_node_delete(context, compute_node_id):
            for cn in fake_compute_nodes:
                if compute_node_id == cn.id:
                    fake_compute_nodes.remove(cn)
                    return

        self.stubs.Set(self.compute, "_get_compute_nodes_in_db", fake_get_compute_nodes_in_db)
        self.stubs.Set(db, "compute_node_delete", fake_compute_node_delete)

        self.compute.update_available_resource(ctx)

        # Verify nothing is deleted if driver and db compute nodes match
        self.assertEqual(len(fake_compute_nodes), 2)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ["A", "B"])

        fake.set_nodes(["A"])
        self.compute.update_available_resource(ctx)

        # Verify B gets deleted since now only A is reported by driver
        self.assertEqual(len(fake_compute_nodes), 1)
        self.assertEqual(fake_compute_nodes[0]["hypervisor_hostname"], "A")
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ["A"])
コード例 #3
0
 def setUp(self):
     super(MissingReqSpecInstanceGroupUUIDTestCase, self).setUp()
     # Stub out external dependencies.
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(nova_fixtures.PlacementFixture())
     fake_image.stub_out_image_service(self)
     self.addCleanup(fake_image.FakeImageService_reset)
     # Configure the API to allow resizing to the same host so we can keep
     # the number of computes down to two in the test.
     self.flags(allow_resize_to_same_host=True)
     # Start nova controller services.
     api_fixture = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1'))
     self.api = api_fixture.admin_api
     self.start_service('conductor')
     # Use our custom weigher defined above to make sure that we have
     # a predictable scheduling sort order.
     self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                group='filter_scheduler')
     self.start_service('scheduler')
     # Start two computes, one where the server will be created and another
     # where we'll cold migrate it.
     self.addCleanup(fake_virt.restore_nodes)
     self.computes = {}  # keep track of the compute services per host name
     for host in ('host1', 'host2'):
         fake_virt.set_nodes([host])
         compute_service = self.start_service('compute', host=host)
         self.computes[host] = compute_service
コード例 #4
0
    def test_evacuate_with_anti_affinity(self):
        created_group = self.api.post_server_groups(self.anti_affinity)
        servers = self._boot_servers_to_group(created_group)

        host = self._get_compute_service_by_host_name(
            servers[1]['OS-EXT-SRV-ATTR:host'])
        host.stop()
        # Need to wait service_down_time amount of seconds to ensure
        # nova considers the host down
        time.sleep(self._service_down_time)

        # Start additional host to test evacuation
        fake.set_nodes(['host3'])
        compute3 = self.start_service('compute', host='host3')

        post = {'evacuate': {}}
        self.admin_api.post_server_action(servers[1]['id'], post)
        self._wait_for_migration_status(servers[1], ['done'])
        evacuated_server = self._wait_for_state_change(
            self.admin_api, servers[1], 'ACTIVE')

        # check that the server is evacuated
        self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
                            servers[1]['OS-EXT-SRV-ATTR:host'])
        # check that policy is kept
        self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
                            servers[0]['OS-EXT-SRV-ATTR:host'])

        compute3.kill()
        host.start()
コード例 #5
0
    def test_evacuate_with_anti_affinity(self):
        created_group = self.api.post_server_groups(self.anti_affinity)
        servers = self._boot_servers_to_group(created_group)

        host = self._get_compute_service_by_host_name(
            servers[1]['OS-EXT-SRV-ATTR:host'])
        host.stop()
        # Need to wait service_down_time amount of seconds to ensure
        # nova considers the host down
        time.sleep(self._service_down_time)

        # Start additional host to test evacuation
        fake.set_nodes(['host3'])
        compute3 = self.start_service('compute', host='host3')

        post = {'evacuate': {}}
        self.admin_api.post_server_action(servers[1]['id'], post)
        self._wait_for_migration_status(servers[1], ['done'])
        evacuated_server = self._wait_for_state_change(
            self.admin_api, servers[1], 'ACTIVE')

        # check that the server is evacuated
        self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
                            servers[1]['OS-EXT-SRV-ATTR:host'])
        # check that policy is kept
        self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
                            servers[0]['OS-EXT-SRV-ATTR:host'])

        compute3.kill()
        host.start()
コード例 #6
0
ファイル: test_bug_1797580.py プロジェクト: arbrandes/nova
    def setUp(self):
        super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built and cold/live migrate it.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api
        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)
コード例 #7
0
ファイル: test_bug_1797580.py プロジェクト: wkite/nova
    def setUp(self):
        super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built and cold/live migrate it.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api
        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)
コード例 #8
0
    def setUp(self):
        super(NonPersistentFieldNotResetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        self.api = api_fixture.admin_api
        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.api.microversion = 'latest'

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        self.compute = {}

        self.addCleanup(fake.restore_nodes)
        for host in ('host1', 'host2', 'host3'):
            fake.set_nodes([host])
            compute_service = self.start_service('compute', host=host)
            self.compute.update({host: compute_service})

        self.ctxt = context.get_admin_context()
コード例 #9
0
    def setUp(self):
        super(AntiAffinityMultiCreateRequest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Add our custom weigher.
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        # disable late check on compute node to mimic devstack.
        self.flags(disable_group_policy_check_upcall=True, group='workarounds')
        self.start_service('scheduler')

        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')
コード例 #10
0
 def setUp(self):
     super(HypervisorsSampleJson233Tests, self).setUp()
     self.api.microversion = self.microversion
     # Start a new compute service to fake a record with hypervisor id=2
     # for pagination test.
     host = 'host1'
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     self.start_service('compute', host=host)
コード例 #11
0
 def setUp(self):
     super(LiveMigrationCinderFailure, self).setUp()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     # Start a second compte node (the first one was started for us by
     # _IntegratedTestBase. set_nodes() is needed to avoid duplicate
     # nodenames. See comments in test_bug_1702454.py.
     fake.set_nodes(['host2'])
     self.addCleanup(fake.restore_nodes)
     self.compute2 = self.start_service('compute', host='host2')
コード例 #12
0
 def setUp(self):
     super(LiveMigrationCinderFailure, self).setUp()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     # Start a second compte node (the first one was started for us by
     # _IntegratedTestBase. set_nodes() is needed to avoid duplicate
     # nodenames. See comments in test_bug_1702454.py.
     fake.set_nodes(['host2'])
     self.addCleanup(fake.restore_nodes)
     self.compute2 = self.start_service('compute', host='host2')
コード例 #13
0
    def test_migrate_with_anti_affinity(self):
        # Start additional host to test migration with anti-affinity
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')

        created_group = self.api.post_server_groups(self.anti_affinity)
        servers = self._boot_servers_to_group(created_group)

        post = {'migrate': {}}
        self.admin_api.post_server_action(servers[1]['id'], post)
        migrated_server = self._wait_for_state_change(
            self.admin_api, servers[1], 'VERIFY_RESIZE')

        self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
                            migrated_server['OS-EXT-SRV-ATTR:host'])
コード例 #14
0
    def test_migrate_with_anti_affinity(self):
        # Start additional host to test migration with anti-affinity
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')

        created_group = self.api.post_server_groups(self.anti_affinity)
        servers = self._boot_servers_to_group(created_group)

        post = {'migrate': {}}
        self.admin_api.post_server_action(servers[1]['id'], post)
        migrated_server = self._wait_for_state_change(
            self.admin_api, servers[1], 'VERIFY_RESIZE')

        self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
                            migrated_server['OS-EXT-SRV-ATTR:host'])
コード例 #15
0
 def test_hypervisors_detail(self):
     # Start another compute service to get a 2nd compute for paging tests.
     host = 'host2'
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     service_2 = self.start_service('compute', host=host).service_ref
     compute_node_2 = service_2.compute_node
     marker = self.compute_node_1.uuid
     subs = {
         'hypervisor_id': compute_node_2.uuid,
         'service_id': service_2.uuid
     }
     response = self._do_get('os-hypervisors/detail?limit=1&marker=%s' %
                             marker)
     self._verify_response('hypervisors-detail-resp', subs, response, 200)
コード例 #16
0
ファイル: test_bug_1718512.py プロジェクト: mahak/nova
    def setUp(self):
        super(TestRequestSpecRetryReschedule, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # The consoleauth service is needed for deleting console tokens when
        # the server is deleted.
        self.start_service('consoleauth')

        # Use our custom weigher defined above to make sure that we have
        # a predictable scheduling sort order.
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        self.addCleanup(fake.restore_nodes)
        for host in ['host1', 'host2', 'host3']:
            fake.set_nodes([host])
            self.start_service('compute', host=host)
コード例 #17
0
    def setUp(self):
        super(TestRequestSpecRetryReschedule, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # The consoleauth service is needed for deleting console tokens when
        # the server is deleted.
        self.start_service('consoleauth')

        # Use our custom weigher defined above to make sure that we have
        # a predictable scheduling sort order.
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        self.addCleanup(fake.restore_nodes)
        for host in ['host1', 'host2', 'host3']:
            fake.set_nodes([host])
            self.start_service('compute', host=host)
コード例 #18
0
ファイル: test_server_group.py プロジェクト: mahak/nova
    def setUp(self):
        super(ServerGroupTestMultiCell, self).setUp()
        # Start two compute services, one per cell
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.compute1 = self.start_service('compute', host='host1',
                                           cell='cell1')
        fake.set_nodes(['host2'])
        self.compute2 = self.start_service('compute', host='host2',
                                           cell='cell2')
        # This is needed to find a server that is still booting with multiple
        # cells, while waiting for the state change to ACTIVE. See the
        # _get_instance method in the compute/api for details.
        self.useFixture(nova_fixtures.AllServicesCurrent())

        self.aggregates = {}
コード例 #19
0
ファイル: test_server_group.py プロジェクト: raubvogel/nova
    def setUp(self):
        super(ServerGroupTestMultiCell, self).setUp()
        # Start two compute services, one per cell
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.compute1 = self.start_service('compute',
                                           host='host1',
                                           cell='cell1')
        fake.set_nodes(['host2'])
        self.compute2 = self.start_service('compute',
                                           host='host2',
                                           cell='cell2')
        # This is needed to find a server that is still booting with multiple
        # cells, while waiting for the state change to ACTIVE. See the
        # _get_instance method in the compute/api for details.
        self.useFixture(nova_fixtures.AllServicesCurrent())

        self.aggregates = {}
コード例 #20
0
 def _start_host_in_zone(self, host, zone):
     # Start the nova-compute service.
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     self.start_service('compute', host=host)
     # Create a host aggregate with a zone in which to put this host.
     aggregate_body = {
         "aggregate": {
             "name": zone,
             "availability_zone": zone
         }
     }
     aggregate = self.api.api_post('/os-aggregates',
                                   aggregate_body).body['aggregate']
     # Now add the compute host to the aggregate.
     add_host_body = {"add_host": {"host": host}}
     self.api.api_post('/os-aggregates/%s/action' % aggregate['id'],
                       add_host_body)
コード例 #21
0
ファイル: test_multiple_nodes.py プロジェクト: AnyBucket/nova
    def test_compute_manager_removes_deleted_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(['A', 'B'])
        self.compute.update_available_resource(ctx)

        rt_A = self.compute._resource_tracker_dict['A']
        rt_B = self.compute._resource_tracker_dict['B']
        self.mox.StubOutWithMock(rt_A, 'update_available_resource')
        self.mox.StubOutWithMock(rt_B, 'update_available_resource')
        rt_A.update_available_resource(ctx)
        rt_B.update_available_resource(ctx, delete=True)
        self.mox.ReplayAll()

        fake.set_nodes(['A'])
        self.compute.update_available_resource(ctx)
        self.mox.VerifyAll()
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                        ['A'])
コード例 #22
0
    def test_resize_then_evacuate(self):
        # Create a server. At this point there is only one compute service.
        flavors = self.api.get_flavors()
        flavor1 = flavors[0]['id']
        server = self._build_server(flavor1)
        server = self.api.post_server({'server': server})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Start up another compute service so we can resize.
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        host2 = self.start_service('compute', host='host2')

        # Now resize the server to move it to host2.
        flavor2 = flavors[1]['id']
        req = {'resize': {'flavorRef': flavor2}}
        self.api.post_server_action(server['id'], req)
        server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
        self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
        self.api.post_server_action(server['id'], {'confirmResize': None})
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Disable the host on which the server is now running (host2).
        host2.stop()
        self.api.force_down_service('host2', 'nova-compute', forced_down=True)

        # Now try to evacuate the server back to the original source compute.
        # FIXME(mriedem): This is bug 1669054 where the evacuate fails with
        # NoValidHost because the RequestSpec.ignore_hosts field has the
        # original source host in it which is the only other available host to
        # which we can evacuate the server.
        req = {'evacuate': {'onSharedStorage': False}}
        self.api.post_server_action(server['id'],
                                    req,
                                    check_response_status=[500])
        # There should be fault recorded with the server.
        server = self._wait_for_state_change(self.api, server, 'ERROR')
        self.assertIn('fault', server)
        self.assertIn('No valid host was found', server['fault']['message'])
        # Assert the RequestSpec.ignore_hosts is still populated.
        reqspec = objects.RequestSpec.get_by_instance_uuid(
            context.get_admin_context(), server['id'])
        self.assertIsNotNone(reqspec.ignore_hosts)
        self.assertIn(self.compute.host, reqspec.ignore_hosts)
コード例 #23
0
    def setUp(self):
        super(ServerGroupTestV21, self).setUp()

        # TODO(sbauza): Remove that once there is a way to have a custom
        # FakeDriver supporting different resources. Note that we can't also
        # simply change the config option for choosing our custom fake driver
        # as the mocked method only accepts to load drivers in the nova.virt
        # tree.
        self.stub_out('nova.virt.driver.load_compute_driver',
                      _fake_load_compute_driver)
        fake.set_nodes(['compute'])
        self.compute = self.start_service('compute', host='compute')

        # NOTE(gibi): start a second compute host to be able to test affinity
        # NOTE(sbauza): Make sure the FakeDriver returns a different nodename
        # for the second compute node.
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.compute2 = self.start_service('compute', host='host2')
コード例 #24
0
    def setUp(self):
        super(ServerGroupTestV21, self).setUp()

        # TODO(sbauza): Remove that once there is a way to have a custom
        # FakeDriver supporting different resources. Note that we can't also
        # simply change the config option for choosing our custom fake driver
        # as the mocked method only accepts to load drivers in the nova.virt
        # tree.
        self.stub_out('nova.virt.driver.load_compute_driver',
                      _fake_load_compute_driver)
        fake.set_nodes(['compute'])
        self.compute = self.start_service('compute', host='compute')

        # NOTE(gibi): start a second compute host to be able to test affinity
        # NOTE(sbauza): Make sure the FakeDriver returns a different nodename
        # for the second compute node.
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.compute2 = self.start_service('compute', host='host2')
コード例 #25
0
    def test_compute_manager_removes_deleted_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(['A', 'B'])

        fake_compute_nodes = [{
            'hypervisor_hostname': 'A',
            'id': 2
        }, {
            'hypervisor_hostname': 'B',
            'id': 3
        }]

        def fake_get_compute_nodes_in_db(context):
            return fake_compute_nodes

        def fake_compute_node_delete(context, compute_node):
            for cn in fake_compute_nodes:
                if (compute_node['hypervisor_hostname'] ==
                        cn['hypervisor_hostname']):
                    fake_compute_nodes.remove(cn)
                    return

        self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
                       fake_get_compute_nodes_in_db)
        self.stubs.Set(self.compute.conductor_api, 'compute_node_delete',
                       fake_compute_node_delete)

        self.compute.update_available_resource(ctx)

        # Verify nothing is deleted if driver and db compute nodes match
        self.assertEqual(len(fake_compute_nodes), 2)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B'])

        fake.set_nodes(['A'])
        self.compute.update_available_resource(ctx)

        # Verify B gets deleted since now only A is reported by driver
        self.assertEqual(len(fake_compute_nodes), 1)
        self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A')
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A'])
コード例 #26
0
    def test_live_migration_actions(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._wait_for_notification('instance.create.end')
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        # server will boot on host1
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.useFixture(fixtures.ConfPatcher(host='host2'))
        self.compute2 = self.start_service('compute', host='host2')

        actions = [
            self._test_live_migration_rollback,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
コード例 #27
0
ファイル: test_instance.py プロジェクト: sapcc/nova
    def test_live_migration_actions(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._wait_for_notification('instance.create.end')
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        # server will boot on host1
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.useFixture(fixtures.ConfPatcher(host='host2'))
        self.compute2 = self.start_service('compute', host='host2')

        actions = [
            self._test_live_migration_rollback,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
コード例 #28
0
ファイル: test_server_group.py プロジェクト: mahak/nova
 def setUp(self):
     super(TestAntiAffinityLiveMigration, self).setUp()
     # Setup common fixtures.
     self.useFixture(policy_fixture.RealPolicyFixture())
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     # Setup API.
     api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
         api_version='v2.1'))
     self.api = api_fixture.api
     self.admin_api = api_fixture.admin_api
     # Fake out glance.
     nova.tests.unit.image.fake.stub_out_image_service(self)
     self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
     # Start conductor, scheduler and two computes.
     self.start_service('conductor')
     self.start_service('scheduler')
     self.addCleanup(fake.restore_nodes)
     for host in ('host1', 'host2'):
         fake.set_nodes([host])
         self.start_service('compute', host=host)
コード例 #29
0
 def setUp(self):
     super(TestAntiAffinityLiveMigration, self).setUp()
     # Setup common fixtures.
     self.useFixture(policy_fixture.RealPolicyFixture())
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     # Setup API.
     api_fixture = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1'))
     self.api = api_fixture.api
     self.admin_api = api_fixture.admin_api
     # Fake out glance.
     nova.tests.unit.image.fake.stub_out_image_service(self)
     self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
     # Start conductor, scheduler and two computes.
     self.start_service('conductor')
     self.start_service('scheduler')
     for host in ('host1', 'host2'):
         fake.set_nodes([host])
         self.addCleanup(fake.restore_nodes)
         self.start_service('compute', host=host)
コード例 #30
0
ファイル: test_multiple_nodes.py プロジェクト: B-Rich/nova-1
    def test_compute_manager_removes_deleted_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(['A', 'B'])

        fake_compute_nodes = [
            compute_node_obj.ComputeNode(
                context=ctx, hypervisor_hostname='A', id=2),
            compute_node_obj.ComputeNode(
                context=ctx, hypervisor_hostname='B', id=3),
            ]

        def fake_get_compute_nodes_in_db(context):
            return fake_compute_nodes

        def fake_compute_node_delete(context, compute_node_id):
            for cn in fake_compute_nodes:
                if compute_node_id == cn.id:
                    fake_compute_nodes.remove(cn)
                    return

        self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
                fake_get_compute_nodes_in_db)
        self.stubs.Set(db, 'compute_node_delete',
                fake_compute_node_delete)

        self.compute.update_available_resource(ctx)

        # Verify nothing is deleted if driver and db compute nodes match
        self.assertEqual(len(fake_compute_nodes), 2)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B'])

        fake.set_nodes(['A'])
        self.compute.update_available_resource(ctx)

        # Verify B gets deleted since now only A is reported by driver
        self.assertEqual(len(fake_compute_nodes), 1)
        self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A')
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                        ['A'])
コード例 #31
0
    def setUp(self):
        super(SchedulerOnlyChecksTargetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.flags(use_neutron=True)
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Define a very basic scheduler that only verifies if host is down.
        self.flags(enabled_filters=['ComputeFilter'], group='filter_scheduler')
        # NOTE(sbauza): Use the above weigher so we are sure that
        # we prefer first host1 for the boot request and forget about any
        # other weigher.
        # Host2 should only be preferred over host3 if and only if that's the
        # only host we verify (as requested_destination does).
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')
        self.useFixture(cast_as_call.CastAsCall(self))
コード例 #32
0
    def test_compute_manager_removes_deleted_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(['A', 'B'])

        fake_compute_nodes = [
            objects.ComputeNode(
                context=ctx, hypervisor_hostname='A', id=2),
            objects.ComputeNode(
                context=ctx, hypervisor_hostname='B', id=3),
            ]

        def fake_get_compute_nodes_in_db(context):
            return fake_compute_nodes

        def fake_compute_node_delete(context, compute_node_id):
            for cn in fake_compute_nodes:
                if compute_node_id == cn.id:
                    fake_compute_nodes.remove(cn)
                    return

        self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
                fake_get_compute_nodes_in_db)
        self.stubs.Set(db, 'compute_node_delete',
                fake_compute_node_delete)

        self.compute.update_available_resource(ctx)

        # Verify nothing is deleted if driver and db compute nodes match
        self.assertEqual(len(fake_compute_nodes), 2)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B'])

        fake.set_nodes(['A'])
        self.compute.update_available_resource(ctx)

        # Verify B gets deleted since now only A is reported by driver
        self.assertEqual(len(fake_compute_nodes), 1)
        self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A')
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                        ['A'])
コード例 #33
0
ファイル: test_bug_1669054.py プロジェクト: panguan737/nova
    def test_resize_then_evacuate(self):
        # Create a server. At this point there is only one compute service.
        flavors = self.api.get_flavors()
        flavor1 = flavors[0]['id']
        server = self._build_server(flavor1)
        server = self.api.post_server({'server': server})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Start up another compute service so we can resize.
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        host2 = self.start_service('compute', host='host2')

        # Now resize the server to move it to host2.
        flavor2 = flavors[1]['id']
        req = {'resize': {'flavorRef': flavor2}}
        self.api.post_server_action(server['id'], req)
        server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
        self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
        self.api.post_server_action(server['id'], {'confirmResize': None})
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Disable the host on which the server is now running (host2).
        host2.stop()
        self.api.force_down_service('host2', 'nova-compute', forced_down=True)
        # Now try to evacuate the server back to the original source compute.
        req = {'evacuate': {'onSharedStorage': False}}
        self.api.post_server_action(server['id'], req)
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')
        # The evacuate flow in the compute manager is annoying in that it
        # sets the instance status to ACTIVE before updating the host, so we
        # have to wait for the migration record to be 'done' to avoid a race.
        self._wait_for_migration_status(server, ['done'])
        self.assertEqual(self.compute.host, server['OS-EXT-SRV-ATTR:host'])

        # Assert the RequestSpec.ignore_hosts field is not populated.
        reqspec = objects.RequestSpec.get_by_instance_uuid(
            context.get_admin_context(), server['id'])
        self.assertIsNone(reqspec.ignore_hosts)
コード例 #34
0
ファイル: test_bug_1669054.py プロジェクト: mahak/nova
    def test_resize_then_evacuate(self):
        # Create a server. At this point there is only one compute service.
        flavors = self.api.get_flavors()
        flavor1 = flavors[0]['id']
        server = self._build_server(flavor1)
        server = self.api.post_server({'server': server})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Start up another compute service so we can resize.
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        host2 = self.start_service('compute', host='host2')

        # Now resize the server to move it to host2.
        flavor2 = flavors[1]['id']
        req = {'resize': {'flavorRef': flavor2}}
        self.api.post_server_action(server['id'], req)
        server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
        self.assertEqual('host2', server['OS-EXT-SRV-ATTR:host'])
        self.api.post_server_action(server['id'], {'confirmResize': None})
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Disable the host on which the server is now running (host2).
        host2.stop()
        self.api.force_down_service('host2', 'nova-compute', forced_down=True)
        # Now try to evacuate the server back to the original source compute.
        req = {'evacuate': {'onSharedStorage': False}}
        self.api.post_server_action(server['id'], req)
        server = self._wait_for_state_change(self.api, server, 'ACTIVE')
        # The evacuate flow in the compute manager is annoying in that it
        # sets the instance status to ACTIVE before updating the host, so we
        # have to wait for the migration record to be 'done' to avoid a race.
        self._wait_for_migration_status(server, ['done'])
        self.assertEqual(self.compute.host, server['OS-EXT-SRV-ATTR:host'])

        # Assert the RequestSpec.ignore_hosts field is not populated.
        reqspec = objects.RequestSpec.get_by_instance_uuid(
            context.get_admin_context(), server['id'])
        self.assertIsNone(reqspec.ignore_hosts)
コード例 #35
0
ファイル: test_availability_zones.py プロジェクト: mahak/nova
 def _start_host_in_zone(self, host, zone):
     # Start the nova-compute service.
     fake.set_nodes([host])
     self.addCleanup(fake.restore_nodes)
     self.start_service('compute', host=host)
     # Create a host aggregate with a zone in which to put this host.
     aggregate_body = {
         "aggregate": {
             "name": zone,
             "availability_zone": zone
         }
     }
     aggregate = self.api.api_post(
         '/os-aggregates', aggregate_body).body['aggregate']
     # Now add the compute host to the aggregate.
     add_host_body = {
         "add_host": {
             "host": host
         }
     }
     self.api.api_post(
         '/os-aggregates/%s/action' % aggregate['id'], add_host_body)
コード例 #36
0
ファイル: test_bug_1781710.py プロジェクト: arbrandes/nova
    def setUp(self):
        super(AntiAffinityMultiCreateRequest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # Add our custom weigher.
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        # disable late check on compute node to mimic devstack.
        self.flags(disable_group_policy_check_upcall=True,
                   group='workarounds')
        self.start_service('scheduler')

        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host2')
コード例 #37
0
ファイル: test_bug_1746483.py プロジェクト: mahak/nova
    def setUp(self):
        super(TestBootFromVolumeIsolatedHostsFilter, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))

        self.api = api_fixture.admin_api

        image_fakes.stub_out_image_service(self)
        self.addCleanup(image_fakes.FakeImageService_reset)

        self.start_service('conductor')

        # Add the IsolatedHostsFilter to the list of enabled filters since it
        # is not enabled by default.
        enabled_filters = CONF.filter_scheduler.enabled_filters
        enabled_filters.append('IsolatedHostsFilter')
        self.flags(
            enabled_filters=enabled_filters,
            isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID],
            isolated_hosts=['host1'],
            restrict_isolated_hosts_to_isolated_images=True,
            group='filter_scheduler')
        self.start_service('scheduler')

        # Create two compute nodes/services so we can restrict the image
        # we'll use to one of the hosts.
        self.addCleanup(fake.restore_nodes)
        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.start_service('compute', host=host)
コード例 #38
0
ファイル: test_bug_1746483.py プロジェクト: zhangshihelp/nova
    def setUp(self):
        super(TestBootFromVolumeIsolatedHostsFilter, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api

        image_fakes.stub_out_image_service(self)
        self.addCleanup(image_fakes.FakeImageService_reset)

        self.start_service('conductor')

        # Add the IsolatedHostsFilter to the list of enabled filters since it
        # is not enabled by default.
        enabled_filters = CONF.filter_scheduler.enabled_filters
        enabled_filters.append('IsolatedHostsFilter')
        self.flags(
            enabled_filters=enabled_filters,
            isolated_images=[image_fakes.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID],
            isolated_hosts=['host1'],
            restrict_isolated_hosts_to_isolated_images=True,
            group='filter_scheduler')
        self.start_service('scheduler')

        # Create two compute nodes/services so we can restrict the image
        # we'll use to one of the hosts.
        for host in ('host1', 'host2'):
            fake.set_nodes([host])
            self.addCleanup(fake.restore_nodes)
            self.start_service('compute', host=host)
コード例 #39
0
ファイル: test_multiple_nodes.py プロジェクト: jorgevgut/nova
    def test_update_available_resource_add_remove_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(["A", "B", "C"])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ["A", "B", "C"])

        fake.set_nodes(["A", "B"])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ["A", "B"])

        fake.set_nodes(["A", "B", "C"])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()), ["A", "B", "C"])
コード例 #40
0
    def test_update_available_resource_add_remove_node(self):
        ctx = context.get_admin_context()
        fake.set_nodes(['A', 'B', 'C'])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B', 'C'])

        fake.set_nodes(['A', 'B'])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B'])

        fake.set_nodes(['A', 'B', 'C'])
        self.compute.update_available_resource(ctx)
        self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
                         ['A', 'B', 'C'])
コード例 #41
0
ファイル: test_virt_drivers.py プロジェクト: zlzlnet/nova
 def setUp(self):
     self.driver_module = 'nova.virt.fake.FakeDriver'
     fake.set_nodes(['myhostname'])
     super(FakeConnectionTestCase, self).setUp()
コード例 #42
0
ファイル: test_multiple_nodes.py プロジェクト: B-Rich/nova-1
 def setUp(self):
     super(FakeDriverMultiNodeTestCase, self).setUp()
     self.driver = fake.FakeDriver(virtapi=None)
     fake.set_nodes(['aaa', 'bbb'])
コード例 #43
0
ファイル: test_multiple_nodes.py プロジェクト: B-Rich/nova-1
 def setUp(self):
     super(FakeDriverSingleNodeTestCase, self).setUp()
     self.driver = fake.FakeDriver(virtapi=None)
     fake.set_nodes(['xyz'])
コード例 #44
0
    def test_instance_list_deleted_service_with_no_uuid(self):
        """This test covers the following scenario:

        1. create an instance on a host which we'll simulate to be old
           by not having a uuid set
        2. migrate the instance to the "newer" host that has a service uuid
        3. delete the old service/compute node
        4. start a new service with the old hostname (still host1); this will
           also create a new compute_nodes table record for that host/node
        5. migrate the instance back to the host1 service
        6. list instances which will try to online migrate the old service uuid
        """
        fake_virt.set_nodes(['host1'])
        self.addCleanup(fake_virt.restore_nodes)
        host1 = self.start_service('compute', host='host1')

        # Create an instance which will be on host1 since it's the only host.
        server_req = self._build_minimal_create_server_request(
            self.api, 'test_instance_list_deleted_service_with_no_uuid',
            image_uuid=self.image_id, networks='none')
        server = self.api.post_server({'server': server_req})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        # Now we start a 2nd compute which is "upgraded" (has a uuid) and
        # we'll migrate the instance to that host.
        fake_virt.set_nodes(['host2'])
        self.addCleanup(fake_virt.restore_nodes)
        host2 = self.start_service('compute', host='host2')
        self.assertIsNotNone(host2.service_ref.uuid)

        server = self._migrate_server(server, 'host2')

        # Delete the host1 service (which implicitly deletes the host1 compute
        # node record).
        host1.stop()
        self.admin_api.api_delete(
            '/os-services/%s' % host1.service_ref.uuid)
        # We should now only have 1 compute service (host2).
        compute_services = self.admin_api.api_get(
            '/os-services?binary=nova-compute').body['services']
        self.assertEqual(1, len(compute_services))
        # Make sure the compute node is also gone.
        self.admin_api.api_get(
            '/os-hypervisors?hypervisor_hostname_pattern=host1',
            check_response_status=[404])

        # Now recreate the host1 service and compute node by restarting the
        # service.
        self.restart_compute_service(host1)
        # At this point, host1's service should have a uuid.
        self.assertIsNotNone(host1.service_ref.uuid)

        # Sanity check that there are 3 services in the database, but only 1
        # is deleted.
        ctxt = nova_context.get_admin_context()
        with utils.temporary_mutation(ctxt, read_deleted='yes'):
            services = db.service_get_all_by_binary(ctxt, 'nova-compute')
            self.assertEqual(3, len(services))
            deleted_services = [svc for svc in services if svc['deleted']]
            self.assertEqual(1, len(deleted_services))
            deleted_service = deleted_services[0]
            self.assertEqual('host1', deleted_service['host'])

        # Now migrate the instance back to host1.
        self._migrate_server(server, 'host1')

        # Now null out the service uuid to simulate that the deleted host1
        # is old. We have to do this through the DB API directly since the
        # Service object won't allow a null uuid field. We also have to do
        # this *after* deleting the service via the REST API and migrating the
        # server because otherwise that will set a uuid when looking up the
        # service.
        with utils.temporary_mutation(ctxt, read_deleted='yes'):
            service_ref = db.service_update(
                ctxt, deleted_service['id'], {'uuid': None})
            self.assertIsNone(service_ref['uuid'])

        # Finally, list servers as an admin so it joins on services to get host
        # information.
        servers = self.admin_api.get_servers(detail=True)
        for server in servers:
            self.assertEqual('UP', server['host_status'])
コード例 #45
0
ファイル: fake_driver.py プロジェクト: pratgohi/nova-powervm
    def __init__(self, virtapi):
        super(FakePowerVMDriver, self).__init__(virtapi)

        # Use the fake driver for scaffolding for now
        fake.set_nodes(['fake-PowerVM'])
        self._fake = fake.FakeDriver(virtapi)
コード例 #46
0
    def __init__(self, virtapi):
        super(FakePowerVMDriver, self).__init__(virtapi)

        # Use the fake driver for scaffolding for now
        fake.set_nodes(['fake-PowerVM'])
        self._fake = fake.FakeDriver(virtapi)
コード例 #47
0
 def setUp(self):
     super(FakeDriverMultiNodeTestCase, self).setUp()
     self.driver = fake.FakeDriver(virtapi=None)
     fake.set_nodes(['aaa', 'bbb'])
コード例 #48
0
 def setUp(self):
     super(FakeDriverSingleNodeTestCase, self).setUp()
     self.driver = fake.FakeDriver(virtapi=None)
     fake.set_nodes(['xyz'])
コード例 #49
0
    def test_serial_no_valid_host_then_pass_with_third_host(self):
        """Creates 2 servers in order (not a multi-create request) in an
        anti-affinity group so there will be 1 server on each host. Then
        attempts to live migrate the first server which will fail because the
        only other available host will be full. Then starts up a 3rd compute
        service and retries the live migration which should then pass.
        """
        # Create the anti-affinity group used for the servers.
        group = self.api.post_server_groups({
            'name': 'test_serial_no_valid_host_then_pass_with_third_host',
            'policies': ['anti-affinity']
        })
        servers = []
        for x in range(2):
            server = self._build_minimal_create_server_request(
                self.api,
                'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
                networks='none')
            # Add the group hint so the server is created in our group.
            server_req = {
                'server': server,
                'os:scheduler_hints': {
                    'group': group['id']
                }
            }
            # Use microversion 2.37 for passing networks='none'.
            with utils.temporary_mutation(self.api, microversion='2.37'):
                server = self.api.post_server(server_req)
                servers.append(
                    self._wait_for_state_change(self.admin_api, server,
                                                'ACTIVE'))

        # Make sure each server is on a unique host.
        hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
        self.assertEqual(2, len(hosts))

        # And make sure the group has 2 members.
        members = self.api.get_server_group(group['id'])['members']
        self.assertEqual(2, len(members))

        # Now attempt to live migrate one of the servers which should fail
        # because we don't have a free host. Since we're using microversion 2.1
        # the scheduling will be synchronous and we should get back a 400
        # response for the NoValidHost error.
        body = {
            'os-migrateLive': {
                'host': None,
                'block_migration': False,
                'disk_over_commit': False
            }
        }
        # Specifically use the first server since that was the first member
        # added to the group.
        server = servers[0]
        ex = self.assertRaises(client.OpenStackApiException,
                               self.admin_api.post_server_action, server['id'],
                               body)
        self.assertEqual(400, ex.response.status_code)
        self.assertIn('No valid host', six.text_type(ex))

        # Now start up a 3rd compute service and retry the live migration which
        # should work this time.
        fake.set_nodes(['host3'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host3')
        self.admin_api.post_server_action(server['id'], body)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Now the server should be on host3 since that was the only available
        # host for the live migration.
        self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
コード例 #50
0
ファイル: test_server_group.py プロジェクト: mahak/nova
    def test_serial_no_valid_host_then_pass_with_third_host(self):
        """Creates 2 servers in order (not a multi-create request) in an
        anti-affinity group so there will be 1 server on each host. Then
        attempts to live migrate the first server which will fail because the
        only other available host will be full. Then starts up a 3rd compute
        service and retries the live migration which should then pass.
        """
        # Create the anti-affinity group used for the servers.
        group = self.api.post_server_groups(
            {'name': 'test_serial_no_valid_host_then_pass_with_third_host',
             'policies': ['anti-affinity']})
        servers = []
        for x in range(2):
            server = self._build_minimal_create_server_request(
                self.api,
                'test_serial_no_valid_host_then_pass_with_third_host-%d' % x,
                networks='none')
            # Add the group hint so the server is created in our group.
            server_req = {
                'server': server,
                'os:scheduler_hints': {'group': group['id']}
            }
            # Use microversion 2.37 for passing networks='none'.
            with utils.temporary_mutation(self.api, microversion='2.37'):
                server = self.api.post_server(server_req)
                servers.append(
                    self._wait_for_state_change(
                        self.admin_api, server, 'ACTIVE'))

        # Make sure each server is on a unique host.
        hosts = set([svr['OS-EXT-SRV-ATTR:host'] for svr in servers])
        self.assertEqual(2, len(hosts))

        # And make sure the group has 2 members.
        members = self.api.get_server_group(group['id'])['members']
        self.assertEqual(2, len(members))

        # Now attempt to live migrate one of the servers which should fail
        # because we don't have a free host. Since we're using microversion 2.1
        # the scheduling will be synchronous and we should get back a 400
        # response for the NoValidHost error.
        body = {
            'os-migrateLive': {
                'host': None,
                'block_migration': False,
                'disk_over_commit': False
            }
        }
        # Specifically use the first server since that was the first member
        # added to the group.
        server = servers[0]
        ex = self.assertRaises(client.OpenStackApiException,
                               self.admin_api.post_server_action,
                               server['id'], body)
        self.assertEqual(400, ex.response.status_code)
        self.assertIn('No valid host', six.text_type(ex))

        # Now start up a 3rd compute service and retry the live migration which
        # should work this time.
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')
        self.admin_api.post_server_action(server['id'], body)
        server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
        # Now the server should be on host3 since that was the only available
        # host for the live migration.
        self.assertEqual('host3', server['OS-EXT-SRV-ATTR:host'])
コード例 #51
0
 def setUp(self):
     self.driver_module = 'nova.virt.fake.FakeDriver'
     fake.set_nodes(['myhostname'])
     super(FakeConnectionTestCase, self).setUp()
コード例 #52
0
ファイル: test_bug_1702454.py プロジェクト: mahak/nova
    def setUp(self):
        super(SchedulerOnlyChecksTargetTest, self).setUp()
        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.flags(use_neutron=True)
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        # The admin API is used to get the server details to verify the
        # host on which the server was built.
        self.admin_api = api_fixture.admin_api
        self.api = api_fixture.api

        # the image fake backend needed for image discovery
        image_fake.stub_out_image_service(self)
        self.addCleanup(image_fake.FakeImageService_reset)

        self.start_service('conductor')

        # We have to get the image before we use 2.latest otherwise we'll get
        # a 404 on the /images proxy API because of 2.36.
        self.image_id = self.api.get_images()[0]['id']

        # Use the latest microversion available to make sure something does
        # not regress in new microversions; cap as necessary.
        self.admin_api.microversion = 'latest'
        self.api.microversion = 'latest'

        # The consoleauth service is needed for deleting console tokens when
        # the server is deleted.
        self.start_service('consoleauth')

        # Define a very basic scheduler that only verifies if host is down.
        self.flags(enabled_filters=['ComputeFilter'],
                   group='filter_scheduler')
        # NOTE(sbauza): Use the above weigher so we are sure that
        # we prefer first host1 for the boot request and forget about any
        # other weigher.
        # Host2 should only be preferred over host3 if and only if that's the
        # only host we verify (as requested_destination does).
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')
        self.start_service('scheduler')

        # Let's now start three compute nodes as we said above.
        # set_nodes() is needed to have each compute service return a
        # different nodename, so we get two hosts in the list of candidates
        # for scheduling. Otherwise both hosts will have the same default
        # nodename "fake-mini". The host passed to start_service controls the
        # "host" attribute and set_nodes() sets the "nodename" attribute.
        # We set_nodes() to make host and nodename the same for each compute.
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.start_service('compute', host='host2')
        fake.set_nodes(['host3'])
        self.start_service('compute', host='host3')
        self.useFixture(cast_as_call.CastAsCall(self))