Esempio n. 1
0
    def _setup_server_with_volume_attached(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        fake_notifier.reset()

        return server
Esempio n. 2
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
                api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Esempio n. 3
0
    def test_migrate_fault(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
            additional_extra_specs={'hw:numa_nodes': 1,
                                    'hw:numa_cpus.0': '0',
                                    'hw:numa_mem.0': 512})
        self._wait_for_notification('instance.create.end')
        # Force down the compute node
        service_id = self.api.get_service_id('nova-compute')
        self.admin_api.put_service_force_down(service_id, True)

        fake_notifier.reset()

        self.assertRaises(api_client.OpenStackApiException,
                          self.admin_api.post_server_action,
                          server['id'], {'migrate': None})
        self._wait_for_notification('compute_task.migrate_server.error')
        # 0. scheduler.select_destinations.start
        # 1. compute_task.migrate_server.error
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'compute_task-migrate_server-error',
            replacements={
                'instance_uuid': server['id'],
                'request_spec.instance_uuid': server['id'],
                'request_spec.security_groups': [],
                'request_spec.numa_topology.instance_uuid': server['id'],
                'request_spec.pci_requests.instance_uuid': server['id'],
                'reason.function_name': self.ANY,
                'reason.module_name': self.ANY,
                'reason.traceback': self.ANY
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
    def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {
            'flavor': {
                'name': 'test_flavor',
                'ram': 512,
                'vcpus': 1,
                'disk': 1,
                'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
            }
        }

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {"extra_specs": {"hw:watchdog_action": "disabled"}}
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        keypair_req = {
            "keypair": {
                "name": "my-key",
                "public_key": fake_crypto.get_ssh_public_key()
            }
        }
        self.api.post_keypair(keypair_req)

        server = self._build_minimal_create_server_request(
            self.api,
            'some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        # NOTE(gibi): from microversion 2.19 the description is not set to the
        # instance name automatically but can be provided at boot.
        server['description'] = 'some-server'

        if extra_params:
            extra_params['return_reservation_id'] = True
            extra_params['key_name'] = 'my-key'
            server.update(extra_params)

        post = {'server': server}
        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False, search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(self.api, created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id

        if found_server['status'] == 'ACTIVE':
            self.api.put_server_tags(found_server['id'], ['tag1'])
        return found_server
Esempio n. 5
0
    def test_server_group_add_member(self):
        group_req = {
            "name": "test-server-group",
            "policy": "anti-affinity",
            "rules": {
                "max_server_per_host": 3
            }
        }
        group = self.api.post_server_groups(group_req)
        fake_notifier.reset()

        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]},
            scheduler_hints={"group": group['id']})
        self._wait_for_notification('instance.update')
        # 0: server_group.add_member
        # 1: instance.create.start
        # 2: instance.create.end
        # 3: instance.update
        #    (Due to adding server tags in the '_boot_a_server' method.)
        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-add_member',
            replacements={
                'uuid': group['id'],
                'members': [server['id']]
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 6
0
    def test_aggregate_update_metadata(self):
        aggregate_req = {
            "aggregate": {
                "name": "my-aggregate",
                "availability_zone": "nova"}}
        aggregate = self.admin_api.post_aggregate(aggregate_req)

        set_metadata_req = {
            "set_metadata": {
                "metadata": {
                    "availability_zone": "AZ-1"
                }
            }
        }
        fake_notifier.reset()
        self.admin_api.post_aggregate_action(aggregate['id'], set_metadata_req)

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'aggregate-update_metadata-start',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'aggregate-update_metadata-end',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 7
0
    def test_rebuild_server_exc(self, mock_rebuild):
        def _compute_resources_unavailable(*args, **kwargs):
            raise exception.ComputeResourcesUnavailable(
                reason="fake-resource")

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        fake_notifier.reset()

        post = {
            'rebuild': {
                'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'metadata': {}
            }
        }
        self.api.post_server_action(server['id'], post)
        mock_rebuild.side_effect = _compute_resources_unavailable
        self._wait_for_state_change(self.api, server, expected_status='ERROR')
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-rebuild-error',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 8
0
    def test_rebuild_server_exc(self, mock_rebuild):
        def _virtual_interface_create_failed(*args, **kwargs):
            # A real error that could come out of driver.spawn() during rebuild
            raise exception.VirtualInterfaceCreateException()

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        fake_notifier.reset()

        post = {
            'rebuild': {
                'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'metadata': {}
            }
        }
        self.api.post_server_action(server['id'], post)
        mock_rebuild.side_effect = _virtual_interface_create_failed
        self._wait_for_state_change(self.api, server, expected_status='ERROR')
        notification = self._get_notifications('instance.rebuild.error')
        self.assertEqual(1, len(notification))
        self._verify_notification(
            'instance-rebuild-error',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=notification[0])
Esempio n. 9
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(nova_fixtures.PlacementFixture())

        self.start_service('conductor')
        self.start_service('scheduler')
        self.start_service('network', manager=CONF.network_manager)
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Esempio n. 10
0
    def test_evacuate_no_valid_host(self):
        # Boot a server
        server = self._boot_a_server()

        # Force source compute down
        compute_id = self.api.get_services(
            host=self.hostname, binary='nova-compute')[0]['id']
        self.api.put_service(compute_id, {'forced_down': 'true'})

        fake_notifier.stub_notifier(self)
        fake_notifier.reset()

        # Initiate evacuation
        post = {'evacuate': {}}
        self.api.post_server_action(server['id'], post)

        self._wait_for_notification_event_type('compute_task.rebuild_server')

        server = self._wait_for_state_change(self.api, server, 'ERROR')
        self.assertEqual(self.hostname, server['OS-EXT-SRV-ATTR:host'])

        # Check migrations
        migrations = self.api.get_migrations()
        self.assertEqual(1, len(migrations))
        self.assertEqual('evacuation', migrations[0]['migration_type'])
        self.assertEqual(server['id'], migrations[0]['instance_uuid'])
        self.assertEqual(self.hostname, migrations[0]['source_compute'])
        self.assertEqual('error', migrations[0]['status'])
Esempio n. 11
0
    def test_aggregate_update_metadata(self):
        aggregate_req = {
            "aggregate": {
                "name": "my-aggregate",
                "availability_zone": "nova"}}
        aggregate = self.admin_api.post_aggregate(aggregate_req)

        set_metadata_req = {
            "set_metadata": {
                "metadata": {
                    "availability_zone": "AZ-1"
                }
            }
        }
        fake_notifier.reset()
        self.admin_api.post_aggregate_action(aggregate['id'], set_metadata_req)

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'aggregate-update_metadata-start',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'aggregate-update_metadata-end',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 12
0
    def setUp(self):
        super(NotificationSampleTestBase, self).setUp()

        api_fixture = self.useFixture(
            nova_fixtures.OSAPIFixture(api_version='v2.1'))

        self.api = api_fixture.api
        self.admin_api = api_fixture.admin_api

        max_version = self.MAX_MICROVERSION
        self.api.microversion = max_version
        self.admin_api.microversion = max_version

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))

        # the image fake backend needed for image discovery
        nova.tests.unit.image.fake.stub_out_image_service(self)
        self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
        self.useFixture(func_fixtures.PlacementFixture())

        context_patcher = self.mock_gen_request_id = mock.patch(
            'oslo_context.context.generate_request_id',
            return_value='req-5b6c791d-5709-4f36-8fbe-c3e02869e35d')
        self.mock_gen_request_id = context_patcher.start()
        self.addCleanup(context_patcher.stop)

        self.start_service('conductor')
        self.start_service('scheduler')
        self.compute = self.start_service('compute')
        # Reset the service create notifications
        fake_notifier.reset()
Esempio n. 13
0
    def test_instance_action(self):
        # A single test case is used to test most of the instance action
        # notifications to avoid booting up an instance for every action
        # separately.
        # Every instance action test function shall make sure that after the
        # function the instance is in active state and usable by other actions.
        # Therefore some action especially delete cannot be used here as
        # recovering from that action would mean to recreate the instance and
        # that would go against the whole purpose of this optimization

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})

        actions = [
            self._test_power_on_server,
            self._test_restore_server,
            self._test_suspend_server,
            self._test_pause_server,
            self._test_shelve_server,
            self._test_resize_server,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
Esempio n. 14
0
    def test_server_group_add_member(self):
        group_req = {
            "name": "test-server-group",
            "policy": "anti-affinity",
            "rules": {"max_server_per_host": 3}
        }
        group = self.api.post_server_groups(group_req)
        fake_notifier.reset()

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
            scheduler_hints={"group": group['id']})
        self._wait_for_notification('instance.update')
        # 0: server_group.add_member
        # 1: scheduler-select_destinations-start
        # 2: scheduler-select_destinations-end
        # 3: instance.create.start
        # 4: instance.create.end
        # 5: instance.update
        #    (Due to adding server tags in the '_boot_a_server' method.)
        self.assertEqual(6, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-add_member',
            replacements={'uuid': group['id'],
                          'members': [server['id']]},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 15
0
    def test_instance_action(self):
        # A single test case is used to test most of the instance action
        # notifications to avoid booting up an instance for every action
        # separately.
        # Every instance action test function shall make sure that after the
        # function the instance is in active state and usable by other actions.
        # Therefore some action especially delete cannot be used here as
        # recovering from that action would mean to recreate the instance and
        # that would go against the whole purpose of this optimization

        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]})

        actions = [
            self._test_power_off_on_server,
            self._test_restore_server,
            self._test_suspend_server,
            self._test_resume_server,
            self._test_pause_server,
            self._test_unpause_server,
            self._test_shelve_server,
            self._test_shelve_offload_server,
            self._test_unshelve_server,
            self._test_resize_server,
            self._test_snapshot_server,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
Esempio n. 16
0
    def test_rebuild_server_exc(self, mock_rebuild):
        def _compute_resources_unavailable(*args, **kwargs):
            raise exception.ComputeResourcesUnavailable(
                reason="fake-resource")

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})

        fake_notifier.reset()

        post = {
            'rebuild': {
                'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'metadata': {}
            }
        }
        self.api.post_server_action(server['id'], post)
        mock_rebuild.side_effect = _compute_resources_unavailable
        self._wait_for_state_change(self.api, server, expected_status='ERROR')
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-rebuild-error',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 17
0
    def test_rebuild_server(self):
        # NOTE(gabor_antal): Rebuild changes the image used by the instance,
        # therefore the actions tested in test_instance_action had to be in
        # specific order. To avoid this problem, rebuild was moved from
        # test_instance_action to its own method.

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        fake_notifier.reset()

        post = {
            'rebuild': {
                'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'metadata': {}
            }
        }
        self.api.post_server_action(server['id'], post)
        # Before going back to ACTIVE state
        # server state need to be changed to REBUILD state
        self._wait_for_state_change(self.api, server,
                                    expected_status='REBUILD')
        self._wait_for_state_change(self.api, server,
                                    expected_status='ACTIVE')

        # The compute/manager will detach every volume during rebuild
        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-rebuild-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_detach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'task_state': 'rebuilding',
                'architecture': None,
                'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
        self._verify_notification(
            'instance-volume_detach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'task_state': 'rebuilding',
                'architecture': None,
                'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
        self._verify_notification(
            'instance-rebuild-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
Esempio n. 18
0
    def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {'flavor': {'name': 'test_flavor',
                                  'ram': 512,
                                  'vcpus': 1,
                                  'disk': 1,
                                  'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
                                  }}

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {
            "extra_specs": {
                "hw:watchdog_action": "disabled"}}
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        keypair_req = {
            "keypair": {
                "name": "my-key",
                "public_key": fake_crypto.get_ssh_public_key()
            }}
        self.api.post_keypair(keypair_req)

        server = self._build_minimal_create_server_request(
            self.api, 'some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        # NOTE(gibi): from microversion 2.19 the description is not set to the
        # instance name automatically but can be provided at boot.
        server['description'] = 'some-server'

        if extra_params:
            extra_params['return_reservation_id'] = True
            extra_params['key_name'] = 'my-key'
            server.update(extra_params)

        post = {'server': server}
        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False,
            search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(self.api, created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id

        if found_server['status'] == 'ACTIVE':
            self.api.put_server_tags(found_server['id'], ['tag1'])
        return found_server
Esempio n. 19
0
    def test_rebuild_server(self):
        # NOTE(gabor_antal): Rebuild changes the image used by the instance,
        # therefore the actions tested in test_instance_action had to be in
        # specific order. To avoid this problem, rebuild was moved from
        # test_instance_action to its own method.

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        fake_notifier.reset()

        post = {
            'rebuild': {
                'imageRef': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'metadata': {}
            }
        }
        self.api.post_server_action(server['id'], post)
        # Before going back to ACTIVE state
        # server state need to be changed to REBUILD state
        self._wait_for_state_change(self.api, server,
                                    expected_status='REBUILD')
        self._wait_for_state_change(self.api, server,
                                    expected_status='ACTIVE')

        # The compute/manager will detach every volume during rebuild
        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-rebuild-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_detach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'task_state': 'rebuilding',
                'architecture': None,
                'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
        self._verify_notification(
            'instance-volume_detach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'task_state': 'rebuilding',
                'architecture': None,
                'image_uuid': 'a2459075-d96c-40d5-893e-577ff92e721c',
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
        self._verify_notification(
            'instance-rebuild-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])
Esempio n. 20
0
    def _setup_server_with_volume_attached(self):
        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        fake_notifier.reset()

        return server
Esempio n. 21
0
    def test_resize_server_error_and_reschedule_was_failed(
            self, mock_prep_resize, mock_reschedule):
        """Test it, when the prep_resize method raise an exception,
        after trying again with the reschedule_resize_or_reraise method
        call, but the rescheduled also was unsuccessful. In this
        case called the exception block.
        In the exception block send a notification about error.
        At end called the six.reraise(*exc_info), which not
        send another error.
        """
        def _build_resources(*args, **kwargs):
            raise exception.FlavorDiskTooSmall()

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self.flags(allow_resize_to_same_host=True)
        other_flavor_body = {
            'flavor': {
                'name': 'other_flavor_error',
                'ram': 512,
                'vcpus': 1,
                'disk': 1,
                'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e9'
            }
        }
        other_flavor_id = self.api.post_flavor(other_flavor_body)['id']

        post = {
            'resize': {
                'flavorRef': other_flavor_id
            }
        }
        fake_notifier.reset()
        mock_prep_resize.side_effect = _build_resources
        # This isn't realistic that _reschedule would raise FlavorDiskTooSmall,
        # but it's needed for the notification sample to work.
        mock_reschedule.side_effect = _build_resources
        self.api.post_server_action(server['id'], post)
        self._wait_for_state_change(self.api, server, expected_status='ERROR')
        # There should be two notifications, one for the instance.resize.error
        # and one for the compute.exception via the wrap_exception decorator on
        # the ComputeManager.prep_resize method.
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
                         'Unexpected number of notifications: %s' %
                         fake_notifier.VERSIONED_NOTIFICATIONS)
        self._wait_for_notification('compute.exception')
        self._verify_notification('instance-resize-error',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 22
0
    def test_aggregate_add_remove_host(self):
        aggregate_req = {
            "aggregate": {
                "name": "my-aggregate",
                "availability_zone": "nova"}}
        aggregate = self.admin_api.post_aggregate(aggregate_req)

        fake_notifier.reset()

        add_host_req = {
            "add_host": {
                "host": "compute"
            }
        }
        self.admin_api.post_aggregate_action(aggregate['id'], add_host_req)

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'aggregate-add_host-start',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'aggregate-add_host-end',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])

        remove_host_req = {
            "remove_host": {
                "host": "compute"
            }
        }
        self.admin_api.post_aggregate_action(aggregate['id'], remove_host_req)

        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'aggregate-remove_host-start',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
        self._verify_notification(
            'aggregate-remove_host-end',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[3])

        self.admin_api.delete_aggregate(aggregate['id'])
Esempio n. 23
0
    def _test_shelve_and_shelve_offload_server(self, server):
        self.flags(shelved_offload_time=-1)
        self.api.post_server_action(server['id'], {'shelve': {}})
        self._wait_for_state_change(self.api,
                                    server,
                                    expected_status='SHELVED')

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-shelve-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-shelve-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])

        fake_notifier.reset()
        self.api.post_server_action(server['id'], {'shelveOffload': {}})
        # we need to wait for the instance.host to become None as well before
        # we can unshelve to make sure that the unshelve.start notification
        # payload is stable as the compute manager first sets the instance
        # state then a bit later sets the instance.host to None.
        self._wait_for_server_parameter(self.api, server, {
            'status': 'SHELVED_OFFLOADED',
            'OS-EXT-SRV-ATTR:host': None
        })

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-shelve_offload-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-shelve_offload-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])

        self.api.post_server_action(server['id'], {'unshelve': None})
        self._wait_for_state_change(self.api, server, 'ACTIVE')
Esempio n. 24
0
    def test_aggregate_cache_images(self):
        aggregate_req = {
            "aggregate": {
                "name": "my-aggregate",
                "availability_zone": "nova"}}
        aggregate = self.admin_api.post_aggregate(aggregate_req)
        add_host_req = {
            "add_host": {
                "host": "compute"
            }
        }
        self.admin_api.post_aggregate_action(aggregate['id'], add_host_req)

        fake_notifier.reset()

        cache_images_req = {
            'cache': [
                {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}
            ]
        }
        self.admin_api.api_post('/os-aggregates/%s/images' % aggregate['id'],
                                cache_images_req)
        # Since the operation is asynchronous we have to wait for the end
        # notification.
        fake_notifier.wait_for_versioned_notifications(
            'aggregate.cache_images.end')

        self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
                         fake_notifier.VERSIONED_NOTIFICATIONS)
        self._verify_notification(
            'aggregate-cache_images-start',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'aggregate-cache_images-progress',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
        self._verify_notification(
            'aggregate-cache_images-end',
            replacements={
                'uuid': aggregate['uuid'],
                'id': aggregate['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
Esempio n. 25
0
    def _test_resize_server(self, server):
        self.flags(allow_resize_to_same_host=True)
        other_flavor_body = {
            'flavor': {
                'name': 'other_flavor',
                'ram': 256,
                'vcpus': 1,
                'disk': 1,
                'id': 'd5a8bb54-365a-45ae-abdb-38d249df7845'
            }
        }
        other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
        extra_specs = {
            "extra_specs": {
                "hw:watchdog_action": "reset"}}
        self.admin_api.post_extra_spec(other_flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        post = {
            'resize': {
                'flavorRef': other_flavor_id
            }
        }
        self.api.post_server_action(server['id'], post)
        self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')

        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        # This list needs to be in order.
        expected_notifications = [
            'instance-resize-start',
            'instance-resize-end',
            'instance-resize_finish-start',
            'instance-resize_finish-end'
        ]
        for idx, notification in enumerate(expected_notifications):
            self._verify_notification(
                notification,
                replacements={
                    'reservation_id': server['reservation_id'],
                    'uuid': server['id']},
                actual=fake_notifier.VERSIONED_NOTIFICATIONS[idx])

        post = {'revertResize': None}
        self.api.post_server_action(server['id'], post)
        self._wait_for_state_change(self.api, server, 'ACTIVE')
Esempio n. 26
0
    def _test_resize_server(self, server):
        self.flags(allow_resize_to_same_host=True)
        other_flavor_body = {
            'flavor': {
                'name': 'other_flavor',
                'ram': 256,
                'vcpus': 1,
                'disk': 1,
                'id': 'd5a8bb54-365a-45ae-abdb-38d249df7845'
            }
        }
        other_flavor_id = self.api.post_flavor(other_flavor_body)['id']
        extra_specs = {
            "extra_specs": {
                "hw:watchdog_action": "reset"}}
        self.admin_api.post_extra_spec(other_flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        post = {
            'resize': {
                'flavorRef': other_flavor_id
            }
        }
        self.api.post_server_action(server['id'], post)
        self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')

        self.assertEqual(4, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        # This list needs to be in order.
        expected_notifications = [
            'instance-resize-start',
            'instance-resize-end',
            'instance-resize_finish-start',
            'instance-resize_finish-end'
        ]
        for idx, notification in enumerate(expected_notifications):
            self._verify_notification(
                notification,
                replacements={
                    'reservation_id': server['reservation_id'],
                    'uuid': server['id']},
                actual=fake_notifier.VERSIONED_NOTIFICATIONS[idx])

        post = {'revertResize': None}
        self.api.post_server_action(server['id'], post)
        self._wait_for_state_change(self.api, server, 'ACTIVE')
Esempio n. 27
0
    def test_live_migration_actions(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._wait_for_notification('instance.create.end')
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        # server will boot on host1
        self.useFixture(fixtures.ConfPatcher(host='host2'))
        self.compute2 = self.start_service('compute', host='host2')

        actions = [
            self._test_live_migration_rollback,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
Esempio n. 28
0
    def test_live_migration_actions(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._wait_for_notification('instance.create.end')
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)
        # server will boot on host1
        self.useFixture(fixtures.ConfPatcher(host='host2'))
        self.compute2 = self.start_service('compute', host='host2')

        actions = [
            self._test_live_migration_rollback,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
Esempio n. 29
0
    def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {
            'flavor': {
                'name': 'test_flavor',
                'ram': 512,
                'vcpus': 1,
                'disk': 1,
                'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
            }
        }

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {"extra_specs": {"hw:watchdog_action": "disabled"}}
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        server = self._build_minimal_create_server_request(
            self.api,
            'some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        if extra_params:
            extra_params['return_reservation_id'] = True
            server.update(extra_params)

        post = {'server': server}
        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False, search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(self.api, created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id
        return found_server
Esempio n. 30
0
    def test_instance_action(self):
        # A single test case is used to test most of the instance action
        # notifications to avoid booting up an instance for every action
        # separately.
        # Every instance action test function shall make sure that after the
        # function the instance is in active state and usable by other actions.
        # Therefore some action especially delete cannot be used here as
        # recovering from that action would mean to recreate the instance and
        # that would go against the whole purpose of this optimization

        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]})

        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        actions = [
            self._test_power_off_on_server,
            self._test_restore_server,
            self._test_suspend_resume_server,
            self._test_pause_unpause_server,
            self._test_shelve_server,
            self._test_shelve_offload_server,
            self._test_unshelve_server,
            self._test_resize_server,
            self._test_revert_server,
            self._test_resize_confirm_server,
            self._test_snapshot_server,
            self._test_reboot_server,
            self._test_reboot_server_error,
            self._test_trigger_crash_dump,
            self._test_volume_detach_attach_server,
            self._test_rescue_server,
            self._test_unrescue_server,
            self._test_soft_delete_server,
            self._test_attach_volume_error,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
Esempio n. 31
0
    def _boot_a_server(self, expected_status='ACTIVE', extra_params=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {'flavor': {'name': 'test_flavor',
                                  'ram': 512,
                                  'vcpus': 1,
                                  'disk': 1,
                                  'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
                                  }}

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {
            "extra_specs": {
                "hw:watchdog_action": "disabled"}}
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        server = self._build_minimal_create_server_request(
            self.api, 'some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        if extra_params:
            extra_params['return_reservation_id'] = True
            server.update(extra_params)

        post = {'server': server}
        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False,
            search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(self.api, created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id
        return found_server
Esempio n. 32
0
    def test_resize_server_error_but_reschedule_was_success(
            self, mock_prep_resize, mock_reschedule):
        """Test it, when the prep_resize method raise an exception,
        but the reschedule_resize_or_reraise was successful and
        scheduled the resize. In this case we get a notification
        about the exception, which caused the prep_resize error.
        """
        def _build_resources(*args, **kwargs):
            raise exception.FlavorDiskTooSmall()

        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]})
        self.flags(allow_resize_to_same_host=True)
        other_flavor_body = {
            'flavor': {
                'name': 'other_flavor_error',
                'ram': 512,
                'vcpus': 1,
                'disk': 1,
                'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e9'
            }
        }
        other_flavor_id = self.api.post_flavor(other_flavor_body)['id']

        post = {'resize': {'flavorRef': other_flavor_id}}
        fake_notifier.reset()
        mock_prep_resize.side_effect = _build_resources
        self.api.post_server_action(server['id'], post)
        self._wait_for_notification('instance.resize.error')
        self.assertEqual(
            1, len(fake_notifier.VERSIONED_NOTIFICATIONS),
            'Unexpected number of notifications: %s' %
            fake_notifier.VERSIONED_NOTIFICATIONS)
        self._verify_notification(
            'instance-resize-error',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 33
0
    def test_instance_action(self):
        # A single test case is used to test most of the instance action
        # notifications to avoid booting up an instance for every action
        # separately.
        # Every instance action test function shall make sure that after the
        # function the instance is in active state and usable by other actions.
        # Therefore some action especially delete cannot be used here as
        # recovering from that action would mean to recreate the instance and
        # that would go against the whole purpose of this optimization

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})

        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        actions = [
            self._test_power_off_on_server,
            self._test_restore_server,
            self._test_suspend_resume_server,
            self._test_pause_unpause_server,
            self._test_shelve_server,
            self._test_shelve_offload_server,
            self._test_unshelve_server,
            self._test_resize_server,
            self._test_revert_server,
            self._test_resize_confirm_server,
            self._test_snapshot_server,
            self._test_reboot_server,
            self._test_reboot_server_error,
            self._test_trigger_crash_dump,
            self._test_volume_detach_attach_server,
            self._test_rescue_server,
            self._test_unrescue_server,
            self._test_soft_delete_server,
            self._test_attach_volume_error,
        ]

        for action in actions:
            fake_notifier.reset()
            action(server)
            # Ensure that instance is in active state after an action
            self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
Esempio n. 34
0
    def test_server_group_create_delete(self):
        group_req = {
            "name": "test-server-group",
            "policies": ["anti-affinity"]}
        group = self.api.post_server_groups(group_req)

        self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-create',
            replacements={'uuid': group['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])

        fake_notifier.reset()
        self.api.delete_server_group(group['id'])

        self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-delete',
            replacements={'uuid': group['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 35
0
    def _test_volume_detach_attach_server(self, server):
        self._detach_volume_from_server(server, self.cinder.SWAP_OLD_VOL)

        # 0. volume_detach-start
        # 1. volume_detach-end
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-volume_detach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_detach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])

        fake_notifier.reset()
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        # 0. volume_attach-start
        # 1. volume_attach-end
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-volume_attach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_attach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 36
0
    def test_rebuild_fault(self):
        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]},
            additional_extra_specs={
                'hw:numa_nodes': 1,
                'hw:numa_cpus.0': '0',
                'hw:numa_mem.0': 512
            })
        self._wait_for_notification('instance.create.end')
        # Force down the compute node
        service_id = self.api.get_service_id('nova-compute')
        self.admin_api.put_service_force_down(service_id, True)

        fake_notifier.reset()

        # NOTE(takashin): The rebuild action and the evacuate action shares
        # same code path. So the 'evacuate' action is used for this test.
        post = {'evacuate': {}}

        self.admin_api.post_server_action(server['id'], post)
        self._wait_for_notification('compute_task.rebuild_server.error')
        # 0. instance.evacuate
        # 1. scheduler.select_destinations.start
        # 2. compute_task.rebuild_server.error
        self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS),
                         fake_notifier.VERSIONED_NOTIFICATIONS)
        self._verify_notification(
            'compute_task-rebuild_server-error',
            replacements={
                'instance_uuid': server['id'],
                'request_spec.instance_uuid': server['id'],
                'request_spec.security_groups': [],
                'request_spec.numa_topology.instance_uuid': server['id'],
                'request_spec.pci_requests.instance_uuid': server['id'],
                'reason.function_name': self.ANY,
                'reason.module_name': self.ANY,
                'reason.traceback': self.ANY
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
Esempio n. 37
0
    def _wait_for_notifications(self, event_type, expected_count, timeout=1.0):
        notifications = []
        start_time = time.clock()

        while (len(notifications) < expected_count
               and time.clock() - start_time < timeout):

            fake_notifier.wait_for_versioned_notification(event_type, timeout)
            notifications += self._get_notifications(event_type)
            # NOTE(gibi): reading and then resetting the fake_notifier without
            # synchronization doesn't lead to race condition as the only
            # parallelism is due to eventlet.
            fake_notifier.reset()

        self.assertEqual(expected_count, len(notifications),
                         'Unexpected number of %s notifications '
                         'within the given timeout. '
                         'Expected %d, got %d: %s' %
                         (event_type, expected_count, len(notifications),
                          notifications))
        return notifications
    def _wait_for_notifications(self, event_type, expected_count, timeout=1.0):
        notifications = []
        start_time = time.clock()

        while (len(notifications) < expected_count
               and time.clock() - start_time < timeout):

            fake_notifier.wait_for_versioned_notification(event_type, timeout)
            notifications += self._get_notifications(event_type)
            # NOTE(gibi): reading and then resetting the fake_notifier without
            # synchronization doesn't lead to race condition as the only
            # parallelism is due to eventlet.
            fake_notifier.reset()

        self.assertEqual(
            expected_count, len(notifications),
            'Unexpected number of %s notifications '
            'within the given timeout. '
            'Expected %d, got %d: %s' %
            (event_type, expected_count, len(notifications), notifications))
        return notifications
Esempio n. 39
0
    def test_server_group_create_delete(self):
        group_req = {
            "name": "test-server-group",
            "policy": "anti-affinity",
            "rules": {"max_server_per_host": 3}
        }
        group = self.api.post_server_groups(group_req)

        self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-create',
            replacements={'uuid': group['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])

        fake_notifier.reset()
        self.api.delete_server_group(group['id'])

        self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'server_group-delete',
            replacements={'uuid': group['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
Esempio n. 40
0
    def _test_restore_server(self, server):
        self.flags(reclaim_instance_interval=30)
        self.api.delete_server(server['id'])
        self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
        # we don't want to test soft_delete here
        fake_notifier.reset()
        self.api.post_server_action(server['id'], {'restore': {}})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-restore-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-restore-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 41
0
    def _test_restore_server(self, server):
        self.flags(reclaim_instance_interval=30)
        self.api.delete_server(server['id'])
        self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
        # we don't want to test soft_delete here
        fake_notifier.reset()
        self.api.post_server_action(server['id'], {'restore': {}})
        self._wait_for_state_change(self.api, server, 'ACTIVE')

        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-restore-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-restore-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 42
0
    def test_migrate_fault(self):
        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]},
            additional_extra_specs={
                'hw:numa_nodes': 1,
                'hw:numa_cpus.0': '0',
                'hw:numa_mem.0': 512
            })
        self._wait_for_notification('instance.create.end')
        # Force down the compute node
        service_id = self.api.get_service_id('nova-compute')
        self.admin_api.put_service_force_down(service_id, True)

        fake_notifier.reset()

        self.assertRaises(api_client.OpenStackApiException,
                          self.admin_api.post_server_action, server['id'],
                          {'migrate': None})
        self._wait_for_notification('compute_task.migrate_server.error')
        # 0. scheduler.select_destinations.start
        # 1. compute_task.migrate_server.error
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
                         fake_notifier.VERSIONED_NOTIFICATIONS)
        self._verify_notification(
            'compute_task-migrate_server-error',
            replacements={
                'instance_uuid': server['id'],
                'request_spec.instance_uuid': server['id'],
                'request_spec.security_groups': [],
                'request_spec.numa_topology.instance_uuid': server['id'],
                'request_spec.pci_requests.instance_uuid': server['id'],
                'reason.function_name': self.ANY,
                'reason.module_name': self.ANY,
                'reason.traceback': self.ANY
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 43
0
    def test_migrate_fault(self):
        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]},
            additional_extra_specs={
                'hw:numa_nodes': 1,
                'hw:numa_cpus.0': '0',
                'hw:numa_mem.0': 512
            })
        self._wait_for_notification('instance.create.end')
        # Disable the compute node
        service_id = self.api.get_service_id('nova-compute')
        self.admin_api.put_service(service_id, {'status': 'disabled'})

        fake_notifier.reset()

        # Note that the operation will return a 202 response but fail with
        # NoValidHost asynchronously.
        self.admin_api.post_server_action(server['id'], {'migrate': None})
        self._wait_for_notification('compute_task.migrate_server.error')
        # 0. scheduler.select_destinations.start
        # 1. compute_task.migrate_server.error
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS),
                         fake_notifier.VERSIONED_NOTIFICATIONS)
        self._verify_notification(
            'compute_task-migrate_server-error',
            replacements={
                'instance_uuid': server['id'],
                'request_spec.instance_uuid': server['id'],
                'request_spec.security_groups': [],
                'request_spec.numa_topology.instance_uuid': server['id'],
                'request_spec.pci_requests.instance_uuid': server['id'],
                'reason.function_name': self.ANY,
                'reason.module_name': self.ANY,
                'reason.traceback': self.ANY
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 44
0
    def _test_volume_attach_detach_server(self, server):
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        # 0. volume_attach-start
        # 1. volume_attach-end
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-volume_attach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_attach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])

        fake_notifier.reset()
        self._detach_volume_from_server(server, self.cinder.SWAP_OLD_VOL)

        # 0. volume_detach-start
        # 1. volume_detach-end
        self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'instance-volume_detach-start',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
        self._verify_notification(
            'instance-volume_detach-end',
            replacements={
                'reservation_id': server['reservation_id'],
                'uuid': server['id']},
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
Esempio n. 45
0
    def test_rebuild_fault(self):
        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
            additional_extra_specs={'hw:numa_nodes': 1,
                                    'hw:numa_cpus.0': '0',
                                    'hw:numa_mem.0': 512})
        self._wait_for_notification('instance.create.end')
        # Force down the compute node
        service_id = self.api.get_service_id('nova-compute')
        self.admin_api.put_service_force_down(service_id, True)

        fake_notifier.reset()

        # NOTE(takashin): The rebuild action and the evacuate action shares
        # same code path. So the 'evacuate' action is used for this test.
        post = {'evacuate': {}}

        self.admin_api.post_server_action(server['id'], post)
        self._wait_for_notification('compute_task.rebuild_server.error')
        # 0. instance.evacuate
        # 1. scheduler.select_destinations.start
        # 2. compute_task.rebuild_server.error
        self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        self._verify_notification(
            'compute_task-rebuild_server-error',
            replacements={
                'instance_uuid': server['id'],
                'request_spec.instance_uuid': server['id'],
                'request_spec.security_groups': [],
                'request_spec.numa_topology.instance_uuid': server['id'],
                'request_spec.pci_requests.instance_uuid': server['id'],
                'reason.function_name': self.ANY,
                'reason.module_name': self.ANY,
                'reason.traceback': self.ANY
            },
            actual=fake_notifier.VERSIONED_NOTIFICATIONS[2])
Esempio n. 46
0
    def test_create_delete_server_with_instance_update(self):
        self.flags(notify_on_state_change='vm_and_task_state')

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})

        instance_updates = self._get_notifications('instance.update')

        # The first notification comes from the nova-api the rest is from the
        # nova-compute. To keep the test simpler assert this fact and then
        # modify the publisher_id of the first notification to match the
        # template
        self.assertEqual('nova-api:fake-mini',
                         instance_updates[0]['publisher_id'])
        instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'

        self.assertEqual(7, len(instance_updates))
        create_steps = [
            # nothing -> scheduling
            {'reservation_id': server['reservation_id'],
             'uuid': server['id'],
             'host': None,
             'node': None,
             'state_update.new_task_state': 'scheduling',
             'state_update.old_task_state': 'scheduling',
             'state_update.state': 'building',
             'state_update.old_state': 'building',
             'state': 'building'},

            # scheduling -> building
            {
             'state_update.new_task_state': None,
             'state_update.old_task_state': 'scheduling',
             'task_state': None},

            # scheduled
            {'host': 'compute',
             'node': 'fake-mini',
             'state_update.old_task_state': None},

            # building -> networking
            {'state_update.new_task_state': 'networking',
             'state_update.old_task_state': 'networking',
             'task_state': 'networking'},

            # networking -> block_device_mapping
            {'state_update.new_task_state': 'block_device_mapping',
             'state_update.old_task_state': 'networking',
             'task_state': 'block_device_mapping',
            },

            # block_device_mapping -> spawning
            {'state_update.new_task_state': 'spawning',
             'state_update.old_task_state': 'block_device_mapping',
             'task_state': 'spawning',
             'ip_addresses': [{
                 "nova_object.name": "IpPayload",
                 "nova_object.namespace": "nova",
                 "nova_object.version": "1.0",
                 "nova_object.data": {
                     "mac": "fa:16:3e:4c:2c:30",
                     "address": "192.168.1.3",
                     "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
                     "meta": {},
                     "version": 4,
                     "label": "private-network",
                     "device_name": "tapce531f90-19"
                 }}]
             },

            # spawning -> active
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'spawning',
             'state_update.state': 'active',
             'launched_at': '2012-10-29T13:42:11Z',
             'state': 'active',
             'task_state': None,
             'power_state': 'running'},
        ]

        replacements = self._verify_instance_update_steps(
                create_steps, instance_updates)

        fake_notifier.reset()

        # Let's generate some bandwidth usage data.
        # Just call the periodic task directly for simplicity
        self.compute.manager._poll_bandwidth_usage(context.get_admin_context())

        self.api.delete_server(server['id'])
        self._wait_until_deleted(server)

        instance_updates = self._get_notifications('instance.update')
        self.assertEqual(2, len(instance_updates))

        delete_steps = [
            # active -> deleting
            {'state_update.new_task_state': 'deleting',
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state': 'active',
             'task_state': 'deleting',
             'bandwidth': [
                 {'nova_object.namespace': 'nova',
                  'nova_object.name': 'BandwidthPayload',
                  'nova_object.data':
                      {'network_name': 'private-network',
                       'out_bytes': 0,
                       'in_bytes': 0},
                  'nova_object.version': '1.0'}]
            },

            # deleting -> deleted
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state_update.state': 'deleted',
             'state': 'deleted',
             'task_state': None,
             'terminated_at': '2012-10-29T13:42:11Z',
             'ip_addresses': [],
             'power_state': 'pending',
             'bandwidth': []},
        ]

        self._verify_instance_update_steps(delete_steps, instance_updates,
                                           initial=replacements)
Esempio n. 47
0
    def _boot_a_server(self, expected_status='ACTIVE', extra_params=None,
                       scheduler_hints=None, additional_extra_specs=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {'flavor': {'name': 'test_flavor',
                                  'ram': 512,
                                  'vcpus': 1,
                                  'disk': 1,
                                  'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
                                  }}

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {
            "extra_specs": {
                "hw:watchdog_action": "disabled"}}
        if additional_extra_specs:
            extra_specs['extra_specs'].update(additional_extra_specs)
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        keypair_req = {
            "keypair": {
                "name": "my-key",
                "public_key": fake_crypto.get_ssh_public_key()
            }}
        self.api.post_keypair(keypair_req)

        keypair_expected_notifications = [
            'keypair-import-start',
            'keypair-import-end'
        ]
        self.assertLessEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        for notification in keypair_expected_notifications:
            self._verify_notification(
                notification,
                actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0))

        server = self._build_minimal_create_server_request(
            self.api, 'some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        # NOTE(gibi): from microversion 2.19 the description is not set to the
        # instance name automatically but can be provided at boot.
        server['description'] = 'some-server'

        if extra_params:
            extra_params['return_reservation_id'] = True
            extra_params['key_name'] = 'my-key'
            server.update(extra_params)

        post = {'server': server}
        if scheduler_hints:
            post.update({"os:scheduler_hints": scheduler_hints})

        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False,
            search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(self.api, created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id

        # Note(elod.illes): let's just pop and verify the dest_select
        # notifications if we don't have a special case
        if scheduler_hints is None and expected_status != 'ERROR':
            self._pop_and_verify_dest_select_notification(found_server['id'])

        if found_server['status'] == 'ACTIVE':
            self.api.put_server_tags(found_server['id'], ['tag1'])
        return found_server
Esempio n. 48
0
    def test_create_delete_server_with_instance_update(self):
        self.flags(notify_on_state_change='vm_and_task_state')

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})

        instance_updates = self._get_notifications('instance.update')

        # The first notification comes from the nova-api the rest is from the
        # nova-compute. To keep the test simpler assert this fact and then
        # modify the publisher_id of the first notification to match the
        # template
        self.assertEqual('nova-api:fake-mini',
                         instance_updates[0]['publisher_id'])
        instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'

        self.assertEqual(7, len(instance_updates))
        create_steps = [
            # nothing -> scheduling
            {'reservation_id':
                notification_sample_base.NotificationSampleTestBase.ANY,
             'uuid': server['id'],
             'host': None,
             'node': None,
             'state_update.new_task_state': 'scheduling',
             'state_update.old_task_state': 'scheduling',
             'state_update.state': 'building',
             'state_update.old_state': 'building',
             'state': 'building'},

            # scheduling -> building
            {
             'state_update.new_task_state': None,
             'state_update.old_task_state': 'scheduling',
             'task_state': None},

            # scheduled
            {'host': 'compute',
             'node': 'fake-mini',
             'state_update.old_task_state': None},

            # building -> networking
            {'state_update.new_task_state': 'networking',
             'state_update.old_task_state': 'networking',
             'task_state': 'networking'},

            # networking -> block_device_mapping
            {'state_update.new_task_state': 'block_device_mapping',
             'state_update.old_task_state': 'networking',
             'task_state': 'block_device_mapping',
            },

            # block_device_mapping -> spawning
            {'state_update.new_task_state': 'spawning',
             'state_update.old_task_state': 'block_device_mapping',
             'task_state': 'spawning',
             'ip_addresses': [{
                 "nova_object.name": "IpPayload",
                 "nova_object.namespace": "nova",
                 "nova_object.version": "1.0",
                 "nova_object.data": {
                     "mac": "fa:16:3e:4c:2c:30",
                     "address": "192.168.1.3",
                     "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
                     "meta": {},
                     "version": 4,
                     "label": "private-network",
                     "device_name": "tapce531f90-19"
                 }}]
             },

            # spawning -> active
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'spawning',
             'state_update.state': 'active',
             'launched_at': '2012-10-29T13:42:11Z',
             'state': 'active',
             'task_state': None,
             'power_state': 'running'},
        ]

        replacements = self._verify_instance_update_steps(
                create_steps, instance_updates)

        fake_notifier.reset()

        # Let's generate some bandwidth usage data.
        # Just call the periodic task directly for simplicity
        self.compute.manager._poll_bandwidth_usage(context.get_admin_context())

        self.api.delete_server(server['id'])
        self._wait_until_deleted(server)

        instance_updates = self._get_notifications('instance.update')
        self.assertEqual(2, len(instance_updates))

        delete_steps = [
            # active -> deleting
            {'state_update.new_task_state': 'deleting',
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state': 'active',
             'task_state': 'deleting',
             'bandwidth': [
                 {'nova_object.namespace': 'nova',
                  'nova_object.name': 'BandwidthPayload',
                  'nova_object.data':
                      {'network_name': 'private-network',
                       'out_bytes': 0,
                       'in_bytes': 0},
                  'nova_object.version': '1.0'}]
            },

            # deleting -> deleted
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state_update.state': 'deleted',
             'state': 'deleted',
             'task_state': None,
             'terminated_at': '2012-10-29T13:42:11Z',
             'ip_addresses': [],
             'power_state': 'pending',
             'bandwidth': []},
        ]

        self._verify_instance_update_steps(delete_steps, instance_updates,
                                           initial=replacements)
Esempio n. 49
0
    def test_create_delete_server_with_instance_update(self):
        # This makes server network creation synchronous which is necessary
        # for notification samples that expect instance.info_cache.network_info
        # to be set.
        self.useFixture(fixtures.SpawnIsSynchronousFixture())
        self.flags(notify_on_state_change='vm_and_task_state',
                   group='notifications')

        server = self._boot_a_server(
            extra_params={'networks': [{
                'port': self.neutron.port_1['id']
            }]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        instance_updates = self._wait_for_notifications('instance.update', 8)

        # The first notification comes from the nova-conductor, the
        # eighth notification comes from nova-api the
        # rest is from the nova-compute. To keep the test simpler
        # assert this fact and then modify the publisher_id of the
        # first and eighth notification to match the template
        self.assertEqual('nova-conductor:fake-mini',
                         instance_updates[0]['publisher_id'])
        self.assertEqual('nova-api:fake-mini',
                         instance_updates[7]['publisher_id'])
        instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'
        instance_updates[7]['publisher_id'] = 'nova-compute:fake-mini'

        create_steps = [
            # nothing -> scheduling
            {
                'reservation_id': server['reservation_id'],
                'uuid': server['id'],
                'host': None,
                'node': None,
                'state_update.new_task_state': 'scheduling',
                'state_update.old_task_state': 'scheduling',
                'state_update.state': 'building',
                'state_update.old_state': 'building',
                'state': 'building'
            },

            # scheduling -> building
            {
                'state_update.new_task_state': None,
                'state_update.old_task_state': 'scheduling',
                'task_state': None
            },

            # scheduled
            {
                'host': 'compute',
                'node': 'fake-mini',
                'state_update.old_task_state': None,
                'updated_at': '2012-10-29T13:42:11Z'
            },

            # building -> networking
            {
                'state_update.new_task_state': 'networking',
                'state_update.old_task_state': 'networking',
                'task_state': 'networking'
            },

            # networking -> block_device_mapping
            {
                'state_update.new_task_state':
                'block_device_mapping',
                'state_update.old_task_state':
                'networking',
                'task_state':
                'block_device_mapping',
                'ip_addresses': [{
                    "nova_object.name": "IpPayload",
                    "nova_object.namespace": "nova",
                    "nova_object.version": "1.0",
                    "nova_object.data": {
                        "mac": "fa:16:3e:4c:2c:30",
                        "address": "192.168.1.3",
                        "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
                        "meta": {},
                        "version": 4,
                        "label": "private-network",
                        "device_name": "tapce531f90-19"
                    }
                }]
            },

            # block_device_mapping -> spawning
            {
                'state_update.new_task_state': 'spawning',
                'state_update.old_task_state': 'block_device_mapping',
                'task_state': 'spawning',
            },

            # spawning -> active
            {
                'state_update.new_task_state': None,
                'state_update.old_task_state': 'spawning',
                'state_update.state': 'active',
                'launched_at': '2012-10-29T13:42:11Z',
                'state': 'active',
                'task_state': None,
                'power_state': 'running'
            },

            # tag added
            {
                'state_update.old_task_state': None,
                'state_update.old_state': 'active',
                'tags': ['tag1']
            },
        ]

        replacements = self._verify_instance_update_steps(
            create_steps, instance_updates)

        fake_notifier.reset()

        # Let's generate some bandwidth usage data.
        # Just call the periodic task directly for simplicity
        self.compute.manager._poll_bandwidth_usage(context.get_admin_context())

        self.api.delete_server(server['id'])
        self._wait_until_deleted(server)

        instance_updates = self._get_notifications('instance.update')
        self.assertEqual(2, len(instance_updates))

        delete_steps = [
            # active -> deleting
            {
                'state_update.new_task_state':
                'deleting',
                'state_update.old_task_state':
                'deleting',
                'state_update.old_state':
                'active',
                'state':
                'active',
                'task_state':
                'deleting',
                'bandwidth': [{
                    'nova_object.namespace': 'nova',
                    'nova_object.name': 'BandwidthPayload',
                    'nova_object.data': {
                        'network_name': 'private-network',
                        'out_bytes': 0,
                        'in_bytes': 0
                    },
                    'nova_object.version': '1.0'
                }],
                'tags': ["tag1"],
                'block_devices': [{
                    "nova_object.data": {
                        "boot_index": None,
                        "delete_on_termination": False,
                        "device_name": "/dev/sdb",
                        "tag": None,
                        "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
                    },
                    "nova_object.name": "BlockDevicePayload",
                    "nova_object.namespace": "nova",
                    "nova_object.version": "1.0"
                }]
            },

            # deleting -> deleted
            {
                'state_update.new_task_state':
                None,
                'state_update.old_task_state':
                'deleting',
                'state_update.old_state':
                'active',
                'state_update.state':
                'deleted',
                'state':
                'deleted',
                'task_state':
                None,
                'terminated_at':
                '2012-10-29T13:42:11Z',
                'ip_addresses': [],
                'power_state':
                'pending',
                'bandwidth': [],
                'tags': ["tag1"],
                'block_devices': [{
                    "nova_object.data": {
                        "boot_index": None,
                        "delete_on_termination": False,
                        "device_name": "/dev/sdb",
                        "tag": None,
                        "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
                    },
                    "nova_object.name": "BlockDevicePayload",
                    "nova_object.namespace": "nova",
                    "nova_object.version": "1.0"
                }]
            },
        ]

        self._verify_instance_update_steps(delete_steps,
                                           instance_updates,
                                           initial=replacements)
Esempio n. 50
0
    def test_create_delete_server_with_instance_update(self):
        # This makes server network creation synchronous which is necessary
        # for notification samples that expect instance.info_cache.network_info
        # to be set.
        self.useFixture(fixtures.SpawnIsSynchronousFixture())
        self.flags(notify_on_state_change='vm_and_task_state',
                   group='notifications')

        server = self._boot_a_server(
            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
        self._attach_volume_to_server(server, self.cinder.SWAP_OLD_VOL)

        instance_updates = self._wait_for_notifications('instance.update', 8)

        # The first notification comes from the nova-conductor, the
        # eighth notification comes from nova-api the
        # rest is from the nova-compute. To keep the test simpler
        # assert this fact and then modify the publisher_id of the
        # first and eighth notification to match the template
        self.assertEqual('nova-conductor:fake-mini',
                         instance_updates[0]['publisher_id'])
        self.assertEqual('nova-api:fake-mini',
                         instance_updates[7]['publisher_id'])
        instance_updates[0]['publisher_id'] = 'nova-compute:fake-mini'
        instance_updates[7]['publisher_id'] = 'nova-compute:fake-mini'

        create_steps = [
            # nothing -> scheduling
            {'reservation_id': server['reservation_id'],
             'uuid': server['id'],
             'host': None,
             'node': None,
             'state_update.new_task_state': 'scheduling',
             'state_update.old_task_state': 'scheduling',
             'state_update.state': 'building',
             'state_update.old_state': 'building',
             'state': 'building'},

            # scheduling -> building
            {
             'state_update.new_task_state': None,
             'state_update.old_task_state': 'scheduling',
             'task_state': None},

            # scheduled
            {'host': 'compute',
             'node': 'fake-mini',
             'state_update.old_task_state': None,
             'updated_at': '2012-10-29T13:42:11Z'},

            # building -> networking
            {'state_update.new_task_state': 'networking',
             'state_update.old_task_state': 'networking',
             'task_state': 'networking'},

            # networking -> block_device_mapping
            {'state_update.new_task_state': 'block_device_mapping',
             'state_update.old_task_state': 'networking',
             'task_state': 'block_device_mapping',
             'ip_addresses': [{
                 "nova_object.name": "IpPayload",
                 "nova_object.namespace": "nova",
                 "nova_object.version": "1.0",
                 "nova_object.data": {
                     "mac": "fa:16:3e:4c:2c:30",
                     "address": "192.168.1.3",
                     "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
                     "meta": {},
                     "version": 4,
                     "label": "private-network",
                     "device_name": "tapce531f90-19"
                 }}]
            },

            # block_device_mapping -> spawning
            {'state_update.new_task_state': 'spawning',
             'state_update.old_task_state': 'block_device_mapping',
             'task_state': 'spawning',
             },

            # spawning -> active
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'spawning',
             'state_update.state': 'active',
             'launched_at': '2012-10-29T13:42:11Z',
             'state': 'active',
             'task_state': None,
             'power_state': 'running'},

            # tag added
            {'state_update.old_task_state': None,
             'state_update.old_state': 'active',
             'tags': ['tag1']},
        ]

        replacements = self._verify_instance_update_steps(
                create_steps, instance_updates)

        fake_notifier.reset()

        # Let's generate some bandwidth usage data.
        # Just call the periodic task directly for simplicity
        self.compute.manager._poll_bandwidth_usage(context.get_admin_context())

        self.api.delete_server(server['id'])
        self._wait_until_deleted(server)

        instance_updates = self._get_notifications('instance.update')
        self.assertEqual(2, len(instance_updates))

        delete_steps = [
            # active -> deleting
            {'state_update.new_task_state': 'deleting',
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state': 'active',
             'task_state': 'deleting',
             'bandwidth': [
                 {'nova_object.namespace': 'nova',
                  'nova_object.name': 'BandwidthPayload',
                  'nova_object.data':
                      {'network_name': 'private-network',
                       'out_bytes': 0,
                       'in_bytes': 0},
                  'nova_object.version': '1.0'}],
             'tags': ["tag1"],
             'block_devices': [{
                "nova_object.data": {
                    "boot_index": None,
                    "delete_on_termination": False,
                    "device_name": "/dev/sdb",
                    "tag": None,
                    "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
                },
                "nova_object.name": "BlockDevicePayload",
                "nova_object.namespace": "nova",
                "nova_object.version": "1.0"
              }]
            },

            # deleting -> deleted
            {'state_update.new_task_state': None,
             'state_update.old_task_state': 'deleting',
             'state_update.old_state': 'active',
             'state_update.state': 'deleted',
             'state': 'deleted',
             'task_state': None,
             'terminated_at': '2012-10-29T13:42:11Z',
             'ip_addresses': [],
             'power_state': 'pending',
             'bandwidth': [],
             'tags': ["tag1"],
             'block_devices': [{
                "nova_object.data": {
                    "boot_index": None,
                    "delete_on_termination": False,
                    "device_name": "/dev/sdb",
                    "tag": None,
                    "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
                },
                "nova_object.name": "BlockDevicePayload",
                "nova_object.namespace": "nova",
                "nova_object.version": "1.0"
              }]
            },
        ]

        self._verify_instance_update_steps(delete_steps, instance_updates,
                                           initial=replacements)
Esempio n. 51
0
    def _boot_a_server(self,
                       expected_status='ACTIVE',
                       extra_params=None,
                       scheduler_hints=None,
                       additional_extra_specs=None):

        # We have to depend on a specific image and flavor to fix the content
        # of the notification that will be emitted
        flavor_body = {
            'flavor': {
                'name': 'test_flavor',
                'ram': 512,
                'vcpus': 1,
                'disk': 1,
                'id': 'a22d5517-147c-4147-a0d1-e698df5cd4e3'
            }
        }

        flavor_id = self.api.post_flavor(flavor_body)['id']
        extra_specs = {"extra_specs": {"hw:watchdog_action": "disabled"}}
        if additional_extra_specs:
            extra_specs['extra_specs'].update(additional_extra_specs)
        self.admin_api.post_extra_spec(flavor_id, extra_specs)

        # Ignore the create flavor notification
        fake_notifier.reset()

        keypair_req = {
            "keypair": {
                "name": "my-key",
                "public_key": fake_crypto.get_ssh_public_key()
            }
        }
        self.api.post_keypair(keypair_req)

        keypair_expected_notifications = [
            'keypair-import-start', 'keypair-import-end'
        ]
        self.assertLessEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
        for notification in keypair_expected_notifications:
            self._verify_notification(
                notification,
                actual=fake_notifier.VERSIONED_NOTIFICATIONS.pop(0))

        server = self._build_server(
            name='some-server',
            image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
            flavor_id=flavor_id)

        # NOTE(gibi): from microversion 2.19 the description is not set to the
        # instance name automatically but can be provided at boot.
        server['description'] = 'some-server'

        if extra_params:
            extra_params['return_reservation_id'] = True
            extra_params['key_name'] = 'my-key'
            server.update(extra_params)

        post = {'server': server}
        if scheduler_hints:
            post.update({"os:scheduler_hints": scheduler_hints})

        created_server = self.api.post_server(post)
        reservation_id = created_server['reservation_id']
        created_server = self.api.get_servers(
            detail=False, search_opts={'reservation_id': reservation_id})[0]

        self.assertTrue(created_server['id'])

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server,
                                                   expected_status)
        found_server['reservation_id'] = reservation_id

        # Note(elod.illes): let's just pop and verify the dest_select
        # notifications if we don't have a special case
        if scheduler_hints is None and expected_status != 'ERROR':
            self._pop_and_verify_dest_select_notification(found_server['id'])

        if found_server['status'] == 'ACTIVE':
            self.api.put_server_tags(found_server['id'], ['tag1'])
        return found_server