Beispiel #1
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(use_local=True, group='conductor')
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager')
        self.compute = importutils.import_object(CONF.compute_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id, **kwargs):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.stubs.Set(nova.tests.unit.image.fake._FakeImageService, 'show',
                       fake_show)
        fake_network.set_stub_network_methods(self.stubs)
        fake_server_actions.stub_out_action_events(self.stubs)
Beispiel #2
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()
        self.fixture = self.useFixture(o_fixture.ClearRequestContext())

        self.net_info = fake_network.fake_get_instance_nw_info(self, 1, 1)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stub_out('nova.network.api.API.get_instance_nw_info',
                      fake_get_nw_info)
        fake_network.set_stub_network_methods(self)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(network_manager='nova.network.manager.FlatManager',
                   notify_on_state_change="vm_and_task_state",
                   host='testhost')

        self.flags(api_servers=['http://localhost:9292'], group='glance')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        self.instance = self._wrapped_create()

        self.decorated_function_called = False
Beispiel #3
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()
        self.fixture = self.useFixture(o_fixture.ClearRequestContext())

        self.net_info = fake_network.fake_get_instance_nw_info(self, 1,
                                                               1)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stub_out('nova.network.api.API.get_instance_nw_info',
                fake_get_nw_info)

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.flags(host='testhost')
        self.flags(notify_on_state_change="vm_and_task_state",
                   group='notifications')

        self.flags(api_servers=['http://localhost:9292'], group='glance')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        self.fake_time = datetime.datetime(2017, 2, 2, 16, 45, 0)
        timeutils.set_time_override(self.fake_time)

        self.instance = self._wrapped_create()

        self.decorated_function_called = False
Beispiel #4
0
    def setUp(self):
        super(_LibvirtEvacuateTest, self).setUp()

        self.useFixture(nova_fixtures.NeutronFixture(self))
        fake_network.set_stub_network_methods(self)
        self.useFixture(nova_fixtures.PlacementFixture())

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.admin_api
        # force_down and evacuate without onSharedStorage
        self.api.microversion = '2.14'

        fake_image.stub_out_image_service(self)
        self.addCleanup(fake_image.FakeImageService_reset)

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(fakelibvirt.FakeLibvirtFixture())

        self.start_service('conductor')
        self.start_service('scheduler')

        self.flags(compute_driver='libvirt.LibvirtDriver')
        self.compute0 = self._start_compute('compute0')

        # Choice of image id and flavor are arbitrary. Fixed for consistency.
        self.image_id = fake_image.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID
        self.flavor_id = next(flavor for flavor in self.api.get_flavors()
                              if flavor['name'] == 'm1.tiny')['id']
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Beispiel #7
0
    def setUp(self):
        super(FakeVersionedNotifierTestCase, self).setUp()

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.context = context.RequestContext()
Beispiel #8
0
    def setUp(self):
        self.flags(compute_driver=self.compute_driver)
        super(ProviderUsageBaseTestCase, self).setUp()

        self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
        self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
        self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
        self.useFixture(nova_fixtures.AllServicesCurrent())

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.admin_api = self.api_fixture.admin_api
        self.admin_api.microversion = self.microversion
        self.api = self.admin_api

        # the image fake backend needed for image discovery
        self.image_service = (
            nova.tests.unit.image.fake.stub_out_image_service(self))

        self.start_service('conductor')
        self.scheduler_service = self.start_service('scheduler')

        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
Beispiel #9
0
    def test_evacuate_no_valid_host(self):
        # Boot a server
        server = self._boot_a_server()

        # Force source compute down
        compute_id = self.api.get_services(
            host=self.hostname, binary='nova-compute')[0]['id']
        self.api.put_service(compute_id, {'forced_down': 'true'})

        fake_notifier.stub_notifier(self)
        fake_notifier.reset()

        # Initiate evacuation
        post = {'evacuate': {}}
        self.api.post_server_action(server['id'], post)

        self._wait_for_notification_event_type('compute_task.rebuild_server')

        server = self._wait_for_state_change(self.api, server, 'ERROR')
        self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])

        # Check migrations
        migrations = self.api.get_migrations()
        self.assertEqual(1, len(migrations))
        self.assertEqual('evacuation', migrations[0]['migration_type'])
        self.assertEqual(server['id'], migrations[0]['instance_uuid'])
        self.assertEqual(self.hostname, migrations[0]['source_compute'])
        self.assertEqual('error', migrations[0]['status'])
    def setUp(self):
        super(NotificationsTestCase, self).setUp()
        self.fixture = self.useFixture(o_fixture.ClearRequestContext())

        self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
                                                               1)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                fake_get_nw_info)
        fake_network.set_stub_network_methods(self.stubs)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager',
                   notify_on_state_change="vm_and_task_state",
                   host='testhost')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        self.instance = self._wrapped_create()

        self.decorated_function_called = False
    def setUp(self):
        super(FakeVersionedNotifierTestCase, self).setUp()

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.context = context.RequestContext()
Beispiel #12
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
        self.useFixture(nova_fixtures.GlanceFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Beispiel #13
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()
        # Needs to mock this to avoid REQUIRES_LOCKING to be set to True
        patcher = mock.patch('oslo_concurrency.lockutils.lock')
        self.addCleanup(patcher.stop)
        patcher.start()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor', manager=CONF.conductor.manager)
        self.start_service('scheduler')
        self.start_service('network')
        self.compute = self.start_service('compute')
Beispiel #14
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stub_out('nova.network.api.get_instance_nw_info',
                      fake_get_nw_info)

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.flags(compute_driver='fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager')
        self.compute = manager.ComputeManager()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id, **kwargs):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.flags(group='glance', api_servers=['http://localhost:9292'])
        self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
                      fake_show)
        fake_network.set_stub_network_methods(self)
        fake_server_actions.stub_out_action_events(self)
Beispiel #15
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        # NOTE(gibi): Notification payloads always reflect the data needed
        # for every supported API microversion so we can safe to use the latest
        # API version in the tests. This helps the test to use the new API
        # features too.
        max_version = 'latest'
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        self.flags(driver='chance_scheduler', group='scheduler')
        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
Beispiel #16
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()

        self.net_info = fake_network.fake_get_instance_nw_info(
            self.stubs, 1, 1)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)
        fake_network.set_stub_network_methods(self.stubs)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager',
                   notify_on_state_change="vm_and_task_state",
                   host='testhost')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        self.instance = self._wrapped_create()
Beispiel #17
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
                api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        self.flags(driver='chance_scheduler', group='scheduler')
        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
Beispiel #18
0
    def test_evacuate_no_valid_host(self):
        # Boot a server
        server = self._boot_a_server()

        # Force source compute down
        compute_id = self.api.get_services(
            host=self.hostname, binary='nova-compute')[0]['id']
        self.api.put_service(compute_id, {'forced_down': 'true'})

        fake_notifier.stub_notifier(self)
        fake_notifier.reset()

        # Initiate evacuation
        post = {'evacuate': {}}
        self.api.post_server_action(server['id'], post)

        self._wait_for_notification_event_type('compute_task.rebuild_server')

        server = self._wait_for_state_change(self.api, server, 'ACTIVE')
        self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])

        # Check migrations
        migrations = self.api.get_migrations()
        self.assertEqual(1, len(migrations))
        self.assertEqual('evacuation', migrations[0]['migration_type'])
        self.assertEqual(server['id'], migrations[0]['instance_uuid'])
        self.assertEqual(self.hostname, migrations[0]['source_compute'])
        self.assertEqual('error', migrations[0]['status'])
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
                api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Beispiel #20
0
    def setUp(self):
        super(ImageCacheTest, self).setUp()

        self.flags(compute_driver='fake.FakeDriverWithCaching')

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
        self.context = context.get_admin_context()

        self.conductor = self.start_service('conductor')
        self.compute1 = self.start_service('compute', host='compute1')
        self.compute2 = self.start_service('compute', host='compute2')
        self.compute3 = self.start_service('compute',
                                           host='compute3',
                                           cell='cell2')
        self.compute4 = self.start_service('compute',
                                           host='compute4',
                                           cell='cell2')
        self.compute5 = self.start_service('compute',
                                           host='compute5',
                                           cell='cell2')

        cell2 = self.cell_mappings['cell2']
        with context.target_cell(self.context, cell2) as cctxt:
            srv = objects.Service.get_by_compute_host(cctxt, 'compute5')
            srv.forced_down = True
            srv.save()
Beispiel #21
0
    def setUp(self):
        super(TestParallelEvacuationWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we need two instances with
        # anti-affinity server group policy to be booted
        self.compute1 = self.start_service('compute', host='host1')
        self.compute2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        manager_class = nova.compute.manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def fake_rebuild(self_, context, instance, *args, **kwargs):
            # Simulate that the rebuild request of one of the instances
            # reaches the target compute manager significantly later so the
            # rebuild of the other instance can finish before the late
            # validation of the first rebuild.
            # We cannot simply delay the virt driver's rebuild or the
            # manager's _rebuild_default_impl as those run after the late
            # validation
            if instance.host == 'host1':
                # wait for the other instance rebuild to start
                fake_notifier.wait_for_versioned_notifications(
                    'instance.rebuild.start', n_events=1)

            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_do_rebuild_instance', fake_rebuild)
Beispiel #22
0
    def test_evacuate_no_valid_host(self):
        # Boot a server
        server = self._boot_a_server()

        # Force source compute down
        compute_id = self.api.get_services(
            host=self.hostname, binary='nova-compute')[0]['id']
        self.api.put_service(compute_id, {'forced_down': 'true'})

        fake_notifier.stub_notifier(self)
        fake_notifier.reset()

        # Initiate evacuation
        self._evacuate_server(
            server, expected_state='ERROR', expected_host=self.hostname,
            expected_migration_status='error')
        self._wait_for_notification_event_type('compute_task.rebuild_server')

        # Check migrations
        migrations = self.api.get_migrations()
        self.assertEqual(1, len(migrations))
        self.assertEqual('evacuation', migrations[0]['migration_type'])
        self.assertEqual(server['id'], migrations[0]['instance_uuid'])
        self.assertEqual(self.hostname, migrations[0]['source_compute'])
        self.assertEqual('error', migrations[0]['status'])
Beispiel #23
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.flags(use_local=True, group='conductor')
        self.flags(compute_driver='fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager')
        self.compute = importutils.import_object(CONF.compute_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id, **kwargs):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.flags(group='glance', api_servers=['http://localhost:9292'])
        self.stubs.Set(nova.tests.unit.image.fake._FakeImageService,
                       'show', fake_show)
        fake_network.set_stub_network_methods(self)
        fake_server_actions.stub_out_action_events(self.stubs)
Beispiel #24
0
    def setUp(self):
        self.flags(compute_driver=self.compute_driver)
        super(ProviderUsageBaseTestCase, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())
        self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(nova_fixtures.AllServicesCurrent())

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        placement = self.useFixture(func_fixtures.PlacementFixture())
        self.placement_api = placement.api
        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))

        self.admin_api = api_fixture.admin_api
        self.admin_api.microversion = self.microversion
        self.api = self.admin_api

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)

        self.start_service('conductor')
        self.scheduler_service = self.start_service('scheduler')

        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.computes = {}
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()
        # Needs to mock this to avoid REQUIRES_LOCKING to be set to True
        patcher = mock.patch('oslo_concurrency.lockutils.lock')
        self.addCleanup(patcher.stop)
        patcher.start()

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
                api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor', manager=CONF.conductor.manager)
        self.start_service('scheduler')
        self.start_service('network')
        self.start_service('compute')
Beispiel #26
0
 def setUp(self):
     super(_BaseTestCase, self).setUp()
     self.remote_object_calls = list()
     self.user_id = 'fake-user'
     self.project_id = 'fake-project'
     self.context = context.RequestContext(self.user_id, self.project_id)
     fake_notifier.stub_notifier(self.stubs)
     self.addCleanup(fake_notifier.reset)
Beispiel #27
0
 def setUp(self):
     super(_BaseTestCase, self).setUp()
     self.remote_object_calls = list()
     self.user_id = 'fake-user'
     self.project_id = 'fake-project'
     self.context = context.RequestContext(self.user_id, self.project_id)
     fake_notifier.stub_notifier(self.stubs)
     self.addCleanup(fake_notifier.reset)
 def setUp(self):
     super(LiveMigrationCinderFailure, self).setUp()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     # Start a second compte node (the first one was started for us by
     # _IntegratedTestBase. set_nodes() is needed to avoid duplicate
     # nodenames. See comments in test_bug_1702454.py.
     self.compute2 = self.start_service('compute', host='host2')
Beispiel #29
0
 def setUp(self):
     super(ComputeHostAPITestCase, self).setUp()
     self.host_api = compute.HostAPI()
     self.aggregate_api = compute_api.AggregateAPI()
     self.ctxt = context.get_admin_context()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     self.req = fakes.HTTPRequest.blank('')
     self.controller = services.ServiceController()
Beispiel #30
0
 def setUp(self):
     super(ComputeHostAPITestCase, self).setUp()
     self.host_api = compute.HostAPI()
     self.aggregate_api = compute_api.AggregateAPI()
     self.ctxt = context.get_admin_context()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     self.req = fakes.HTTPRequest.blank('')
     self.controller = services.ServiceController()
Beispiel #31
0
    def setUp(self):
        super().setUp()
        images = self.api.get_images()
        # save references to first two images for server create and rebuild
        self.image_ref_0 = images[0]['id']
        self.image_ref_1 = images[1]['id']

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
Beispiel #32
0
 def setUp(self):
     super(LiveMigrationCinderFailure, self).setUp()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
     # Start a second compte node (the first one was started for us by
     # _IntegratedTestBase. set_nodes() is needed to avoid duplicate
     # nodenames. See comments in test_bug_1702454.py.
     fake.set_nodes(['host2'])
     self.addCleanup(fake.restore_nodes)
     self.compute2 = self.start_service('compute', host='host2')
Beispiel #33
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
Beispiel #34
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
                api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
Beispiel #35
0
 def setUp(self):
     super(ServicesJsonTest, self).setUp()
     self.stub_out("nova.db.service_get_all",
                   test_services.fake_db_api_service_get_all)
     self.stub_out("nova.db.service_get_by_host_and_binary",
                   test_services.fake_service_get_by_host_binary)
     self.stub_out("nova.db.service_update",
                   test_services.fake_service_update)
     self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
     fake_notifier.stub_notifier(self.stubs)
     self.addCleanup(fake_notifier.reset)
    def setUp(self):
        # Use a custom weigher to make sure that we have a predictable host
        # selection order during scheduling
        self.useFixture(nova_fixtures.HostNameWeigherFixture())

        super(PinnedComputeRpcTests, self).setUp()
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.compute1 = self._start_compute(host='host1')
        self.compute2 = self._start_compute(host='host2')
        self.compute3 = self._start_compute(host='host3')
Beispiel #37
0
    def test_end_to_end(self):
        """This test emulates a full end to end test showing that without this
        feature a vm cannot be spawning using a custom trait and then start a
        compute service that provides that trait.
        """

        self.neutron = nova_fixtures.NeutronFixture(self)
        self.useFixture(self.neutron)
        fake_image.stub_out_image_service(self)
        self.addCleanup(fake_image.FakeImageService_reset)
        # Start nova services.
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
        self.api.microversion = 'latest'
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
        self.start_service('conductor')
        # start nova-compute that will not have the additional trait.
        self._start_compute("fake-host-1")

        node_name = "fake-host-2"

        # create a config file with explicit name
        provider_config = self._create_config_entry(node_name,
                                                    id_method="name")
        self._place_config_file("provider_config.yaml", provider_config)

        self._create_flavor(name='CUSTOM_Flavor',
                            id=42,
                            vcpu=4,
                            memory_mb=4096,
                            disk=1024,
                            swap=0,
                            extra_spec={
                                f"trait:{os_traits.normalize_name(node_name)}":
                                "required"
                            })

        self._create_server(flavor_id=42,
                            expected_state='ERROR',
                            networks=[{
                                'port': self.neutron.port_1['id']
                            }])

        # start compute node that will report the custom trait.
        self._start_compute("fake-host-2")
        self._create_server(flavor_id=42,
                            expected_state='ACTIVE',
                            networks=[{
                                'port': self.neutron.port_1['id']
                            }])
Beispiel #38
0
    def setUp(self):
        super(_IntegratedTestBase, self).setUp()

        self.useFixture(cast_as_call.CastAsCall(self))

        self.placement = self.useFixture(func_fixtures.PlacementFixture()).api
        self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
        self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
        self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
        self.policy = self.useFixture(policy_fixture.RealPolicyFixture())

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self._setup_services()
Beispiel #39
0
    def setUp(self):
        super(_IntegratedTestBase, self).setUp()

        self.fake_image_service =\
            nova.tests.unit.image.fake.stub_out_image_service(self)

        self.useFixture(cast_as_call.CastAsCall(self))
        placement = self.useFixture(func_fixtures.PlacementFixture())
        self.placement_api = placement.api
        self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self._setup_services()

        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
Beispiel #40
0
    def setUp(self):
        # Use our custom weigher to make sure that we have
        # a predictable host selection order during scheduling
        self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
                   group='filter_scheduler')

        super(PinnedComputeRpcTests, self).setUp()
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(RequestSpecImageSerializationFixture(self))

        self.compute1 = self._start_compute(host='host1')
        self.compute2 = self._start_compute(host='host2')
        self.compute3 = self._start_compute(host='host3')

        flavors = self.api.get_flavors()
        self.flavor1 = flavors[0]
Beispiel #41
0
    def setUp(self):
        super(RegressionTest1835822, self).setUp()
        # Use the standard fixtures.
        self.useFixture(policy_fixture.RealPolicyFixture())
        self.useFixture(nova_fixtures.NeutronFixture(self))
        self.useFixture(func_fixtures.PlacementFixture())
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).api
        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('compute')
        # the image fake backend needed for image discovery
        fake_image.stub_out_image_service(self)
        self.addCleanup(fake_image.FakeImageService_reset)
        images = self.api.get_images()
        self.image_ref_0 = images[0]['id']
        self.image_ref_1 = images[1]['id']

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)
Beispiel #42
0
    def setUp(self):
        super(UnshelveNeutronErrorTest, self).setUp()
        # Start standard fixtures.
        placement = func_fixtures.PlacementFixture()
        self.useFixture(placement)
        self.placement = placement.api
        self.neutron = nova_fixtures.NeutronFixture(self)
        self.useFixture(self.neutron)
        self.useFixture(nova_fixtures.GlanceFixture(self))
        # Start nova services.
        self.api = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
        self.api.microversion = 'latest'
        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('compute', host='host1')
        self.start_service('compute', host='host2')
Beispiel #43
0
 def setUp(self):
     super(RescheduleBuildAvailabilityZoneUpCall, self).setUp()
     # Use the standard fixtures.
     self.useFixture(policy_fixture.RealPolicyFixture())
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     fake_image.stub_out_image_service(self)
     self.addCleanup(fake_image.FakeImageService_reset)
     # Start controller services.
     self.api = self.useFixture(nova_fixtures.OSAPIFixture(
         api_version='v2.1')).admin_api
     self.start_service('conductor')
     self.start_service('scheduler')
     # Start two computes with the fake reschedule driver.
     self.flags(compute_driver='fake.FakeRescheduleDriver')
     self.start_service('compute', host='host1')
     self.start_service('compute', host='host2')
     # Listen for notifications.
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
Beispiel #44
0
 def setUp(self):
     super(RescheduleMigrateAvailabilityZoneUpCall, self).setUp()
     # Use the standard fixtures.
     self.useFixture(nova_fixtures.RealPolicyFixture())
     self.useFixture(nova_fixtures.GlanceFixture(self))
     self.useFixture(nova_fixtures.NeutronFixture(self))
     self.useFixture(func_fixtures.PlacementFixture())
     # Start controller services.
     self.api = self.useFixture(
         nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
     self.start_service('conductor')
     self.start_service('scheduler')
     # We need three hosts for this test, one is the initial host on which
     # the server is built, and the others are for the migration where the
     # first will fail and the second is an alternate.
     self.start_service('compute', host='host1')
     self.start_service('compute', host='host2')
     self.start_service('compute', host='host3')
     # Listen for notifications.
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)
Beispiel #45
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(network_api.API, "get_instance_nw_info", fake_get_nw_info)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(use_local=True, group="conductor")
        self.flags(compute_driver="nova.virt.fake.FakeDriver", network_manager="nova.network.manager.FlatManager")
        self.compute = importutils.import_object(CONF.compute_manager)
        self.user_id = "fake"
        self.project_id = "fake"
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id, **kwargs):
            return {"id": 1, "properties": {"kernel_id": 1, "ramdisk_id": 1}}

        self.stubs.Set(nova.tests.unit.image.fake._FakeImageService, "show", fake_show)
        fake_network.set_stub_network_methods(self.stubs)
        fake_server_actions.stub_out_action_events(self.stubs)
Beispiel #46
0
 def setUp(self):
     super(ComputeHostAPITestCase, self).setUp()
     self.host_api = compute.HostAPI()
     self.ctxt = context.get_admin_context()
     fake_notifier.stub_notifier(self.stubs)
     self.addCleanup(fake_notifier.reset)
Beispiel #47
0
    def setUp(self):
        super(TestParallelEvacuationWithServerGroup, self).setUp()

        self.useFixture(policy_fixture.RealPolicyFixture())

        # The NeutronFixture is needed to stub out validate_networks in API.
        self.useFixture(nova_fixtures.NeutronFixture(self))

        # This stubs out the network allocation in compute.
        fake_network.set_stub_network_methods(self)

        # We need the computes reporting into placement for the filter
        # scheduler to pick a host.
        self.useFixture(func_fixtures.PlacementFixture())

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
            api_version='v2.1'))
        self.api = api_fixture.admin_api
        # 2.11 is needed for force_down
        # 2.14 is needed for evacuate without onSharedStorage flag
        self.api.microversion = '2.14'

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)

        self.start_service('conductor')
        self.start_service('scheduler')

        # We start two compute services because we need two instances with
        # anti-affinity server group policy to be booted
        fake.set_nodes(['host1'])
        self.addCleanup(fake.restore_nodes)
        self.compute1 = self.start_service('compute', host='host1')
        fake.set_nodes(['host2'])
        self.compute2 = self.start_service('compute', host='host2')

        self.image_id = self.api.get_images()[0]['id']
        self.flavor_id = self.api.get_flavors()[0]['id']

        manager_class = nova.compute.manager.ComputeManager
        original_rebuild = manager_class._do_rebuild_instance

        def fake_rebuild(self_, context, instance, *args, **kwargs):
            # Simulate that the rebuild request of one of the instances
            # reaches the target compute manager significantly later so the
            # rebuild of the other instance can finish before the late
            # validation of the first rebuild.
            # We cannot simply delay the virt driver's rebuild or the
            # manager's _rebuild_default_impl as those run after the late
            # validation
            if instance.host == 'host1':
                # wait for the other instance rebuild to start
                fake_notifier.wait_for_versioned_notifications(
                    'instance.rebuild.start', n_events=1)

            original_rebuild(self_, context, instance, *args, **kwargs)

        self.stub_out('nova.compute.manager.ComputeManager.'
                      '_do_rebuild_instance', fake_rebuild)
Beispiel #48
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        self.useFixture(fixtures.TempDir()).path
        fake_utils.stub_out_utils_spawn_n(self.stubs)
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.unit.fake_volume.API')

        def fake_show(meh, context, id, **kwargs):
            return {'id': id,
                    'name': 'fake_name',
                    'container_format': 'ami',
                    'status': 'active',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # Stub out the notification service so we use the no-op serializer
        # and avoid lazy-load traces with the wrap_exception decorator in
        # the compute service.
        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        # set up services
        self.conductor = self.start_service('conductor',
                manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.consoleauth = self.start_service('consoleauth')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        self.stubs.Set(compute_manager.ComputeManager,
                       '_update_scheduler_instance_info', dumb)
        self.stubs.Set(compute_manager.ComputeManager,
                       '_delete_scheduler_instance_info', dumb)
        self.stubs.Set(compute_manager.ComputeManager,
                       '_sync_scheduler_instance_info', dumb)
        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
Beispiel #49
0
    def setUp(self):
        super(EC2ValidateTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver')

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # Stub out the notification service so we use the no-op serializer
        # and avoid lazy-load traces with the wrap_exception decorator in
        # the compute service.
        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        # set up services
        self.conductor = self.start_service('conductor',
                manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduter = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.image_service = fake.FakeImageService()

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)

        self.EC2_MALFORMED_IDS = ['foobar', '', 123]
        self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']

        self.ec2_id_exception_map = [(x,
                exception.InvalidInstanceIDMalformed)
                for x in self.EC2_MALFORMED_IDS]
        self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
                for x in self.EC2_VALID__IDS])
        self.volume_id_exception_map = [(x,
                exception.InvalidVolumeIDMalformed)
                for x in self.EC2_MALFORMED_IDS]
        self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
                for x in self.EC2_VALID__IDS])

        def fake_show(meh, context, id, **kwargs):
            return {'id': id,
                    'container_format': 'ami',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(self, context, **kwargs):
            image = fake_show(self, context, None)
            image['name'] = kwargs.get('name')
            return [image]

        fake.stub_out_image_service(self)
        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
Beispiel #50
0
 def setUp(self):
     super(WrapExceptionTestCase, self).setUp()
     fake_notifier.stub_notifier(self)
     self.addCleanup(fake_notifier.reset)