def test_iscsi_volume(self):
        # Live block migrate an instance to another host
        if len(self._get_compute_hostnames()) < 2:
            raise self.skipTest(
                "Less than 2 compute nodes, skipping migration test.")
        server_id = self._get_an_active_server()
        actual_host = self._get_host_for_server(server_id)
        target_host = self._get_host_other_than(actual_host)

        volume = self.volumes_client.create_volume(
            display_name='test')['volume']

        self.volumes_client.wait_for_volume_status(volume['id'],
                                                   'available')
        self.addCleanup(self._volume_clean_up, server_id, volume['id'])

        # Attach the volume to the server
        self.servers_client.attach_volume(server_id, volumeId=volume['id'],
                                          device='/dev/xvdb')
        self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')

        self._migrate_server_to(server_id, target_host)
        waiters.wait_for_server_status(self.servers_client,
                                       server_id, 'ACTIVE')
        self.assertEqual(target_host, self._get_host_for_server(server_id))
Beispiel #2
0
    def setup_network_and_server(cls, router=None, **kwargs):
        """Create network resources and a server.

        Creating a network, subnet, router, keypair, security group
        and a server.
        """
        cls.network = cls.create_network()
        LOG.debug("Created network %s", cls.network['name'])
        cls.subnet = cls.create_subnet(cls.network)
        LOG.debug("Created subnet %s", cls.subnet['id'])

        secgroup = cls.manager.network_client.create_security_group(
            name=data_utils.rand_name('secgroup-'))
        LOG.debug("Created security group %s",
                  secgroup['security_group']['name'])
        cls.security_groups.append(secgroup['security_group'])
        if not router:
            router = cls.create_router_by_client(**kwargs)
        cls.create_router_interface(router['id'], cls.subnet['id'])
        cls.keypair = cls.create_keypair()
        cls.create_loginable_secgroup_rule(
            secgroup_id=secgroup['security_group']['id'])
        cls.server = cls.create_server(
            flavor_ref=CONF.compute.flavor_ref,
            image_ref=CONF.compute.image_ref,
            key_name=cls.keypair['name'],
            networks=[{'uuid': cls.network['id']}],
            security_groups=[{'name': secgroup['security_group']['name']}])
        waiters.wait_for_server_status(cls.manager.servers_client,
                                       cls.server['server']['id'],
                                       constants.SERVER_STATUS_ACTIVE)
        port = cls.client.list_ports(network_id=cls.network['id'],
                                     device_id=cls.server[
                                          'server']['id'])['ports'][0]
        cls.fip = cls.create_and_associate_floatingip(port['id'])
Beispiel #3
0
 def test_list_servers_filter_by_exist_host(self):
     # Filter the list of servers by existent host
     name = data_utils.rand_name('server')
     flavor = self.flavor_ref
     image_id = self.image_ref
     network = self.get_tenant_network()
     network_kwargs = fixed_network.set_networks_kwarg(network)
     test_server = self.client.create_server(name=name, imageRef=image_id,
                                             flavorRef=flavor,
                                             **network_kwargs)['server']
     self.addCleanup(self.client.delete_server, test_server['id'])
     waiters.wait_for_server_status(self.client,
                                    test_server['id'], 'ACTIVE')
     server = self.client.show_server(test_server['id'])['server']
     self.assertEqual(server['status'], 'ACTIVE')
     hostname = server[self._host_key]
     params = {'host': hostname}
     body = self.client.list_servers(**params)
     servers = body['servers']
     nonexistent_params = {'host': 'nonexistent_host'}
     nonexistent_body = self.client.list_servers(**nonexistent_params)
     nonexistent_servers = nonexistent_body['servers']
     self.assertIn(test_server['id'], map(lambda x: x['id'], servers))
     self.assertNotIn(test_server['id'],
                      map(lambda x: x['id'], nonexistent_servers))
    def _test_live_migration(self, state='ACTIVE', volume_backed=False):
        """Tests live migration between two hosts.

        Requires CONF.compute_feature_enabled.live_migration to be True.

        :param state: The vm_state the migrated server should be in before and
                      after the live migration. Supported values are 'ACTIVE'
                      and 'PAUSED'.
        :param volume_backed: If the instance is volume backed or not. If
                              volume_backed, *block* migration is not used.
        """
        # Live migrate an instance to another host
        server_id = self.create_test_server(wait_until="ACTIVE",
                                            volume_backed=volume_backed)['id']
        source_host = self.get_host_for_server(server_id)
        destination_host = self.get_host_other_than(server_id)

        if state == 'PAUSED':
            self.admin_servers_client.pause_server(server_id)
            waiters.wait_for_server_status(self.admin_servers_client,
                                           server_id, state)

        LOG.info("Live migrate from source %s to destination %s",
                 source_host, destination_host)
        self._live_migrate(server_id, destination_host, state, volume_backed)
        if CONF.compute_feature_enabled.live_migrate_back_and_forth:
            # If live_migrate_back_and_forth is enabled it is a grenade job.
            # Therefore test should validate whether LM is compatible in both
            # ways, so live migrate VM back to the source host
            LOG.info("Live migrate back to source %s", source_host)
            self._live_migrate(server_id, source_host, state, volume_backed)
Beispiel #5
0
    def test_rebuild_server_in_error_state(self):
        # The server in error state should be rebuilt using the provided
        # image and changed to ACTIVE state

        # resetting vm state require admin privilege
        self.client.reset_state(self.s1_id, state='error')
        rebuilt_server = self.non_admin_client.rebuild_server(
            self.s1_id, self.image_ref_alt)['server']
        self.addCleanup(waiters.wait_for_server_status, self.non_admin_client,
                        self.s1_id, 'ACTIVE')
        self.addCleanup(self.non_admin_client.rebuild_server, self.s1_id,
                        self.image_ref)

        # Verify the properties in the initial response are correct
        self.assertEqual(self.s1_id, rebuilt_server['id'])
        rebuilt_image_id = rebuilt_server['image']['id']
        self.assertEqual(self.image_ref_alt, rebuilt_image_id)
        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
        waiters.wait_for_server_status(self.non_admin_client,
                                       rebuilt_server['id'], 'ACTIVE',
                                       raise_on_error=False)
        # Verify the server properties after rebuilding
        server = (self.non_admin_client.show_server(rebuilt_server['id'])
                  ['server'])
        rebuilt_image_id = server['image']['id']
        self.assertEqual(self.image_ref_alt, rebuilt_image_id)
 def test_delete_server_while_in_suspended_state(self):
     # Delete a server while it's VM state is Suspended
     server = self.create_test_server(wait_until='ACTIVE')
     self.client.suspend_server(server['id'])
     waiters.wait_for_server_status(self.client, server['id'], 'SUSPENDED')
     self.client.delete_server(server['id'])
     waiters.wait_for_server_termination(self.client, server['id'])
 def _stop_instances(self, instances):
     # NOTE(gfidente): two loops so we do not wait for the status twice
     for i in instances:
         self.servers_client.stop(i['id'])
     for i in instances:
         waiters.wait_for_server_status(self.servers_client,
                                        i['id'], 'SHUTOFF')
 def test_delete_server_while_in_shutoff_state(self):
     # Delete a server while it's VM state is Shutoff
     server = self.create_test_server(wait_until='ACTIVE')
     self.client.stop_server(server['id'])
     waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
     self.client.delete_server(server['id'])
     waiters.wait_for_server_termination(self.client, server['id'])
 def test_delete_server_while_in_pause_state(self):
     # Delete a server while it's VM state is Pause
     server = self.create_test_server(wait_until='ACTIVE')
     self.client.pause_server(server['id'])
     waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
     self.client.delete_server(server['id'])
     waiters.wait_for_server_termination(self.client, server['id'])
 def reboot_instance(self, instance_id):
     """Reboot the instance with the given id."""
     self.servers_client.reboot_server(
         server_id=instance_id, type='soft')
     waiters.wait_for_server_status(
         self.servers_client,
         instance_id, 'ACTIVE')
 def setUp(self):
     super(ServersNegativeTestMultiTenantJSON, self).setUp()
     try:
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'ACTIVE')
     except Exception:
         self.__class__.server_id = self.rebuild_server(self.server_id)
Beispiel #12
0
 def _wait_for_server(self, server):
     waiters.wait_for_server_status(self.manager.servers_client,
                                    server['server']['id'],
                                    constants.SERVER_STATUS_ACTIVE)
     self.check_connectivity(server['fip']['floating_ip_address'],
                             CONF.validation.image_ssh_user,
                             self.keypair['private_key'])
Beispiel #13
0
 def _create_server(self, name):
     keypair = self.create_keypair()
     security_groups = [{'name': self.security_group['name']}]
     create_kwargs = {
         'networks': [
             {'uuid': self.network['id']},
         ],
         'key_name': keypair['name'],
         'security_groups': security_groups,
         'name': name
     }
     net_name = self.network['name']
     server = self.create_server(**create_kwargs)
     waiters.wait_for_server_status(self.servers_client,
                                    server['id'], 'ACTIVE')
     server = self.servers_client.show_server(server['id'])
     server = server['server']
     self.servers_keypairs[server['id']] = keypair
     if (config.network.public_network_id and not
             config.network.project_networks_reachable):
         public_network_id = config.network.public_network_id
         floating_ip = self.create_floating_ip(
             server, public_network_id)
         self.floating_ips[floating_ip] = server
         self.server_ips[server['id']] = floating_ip.floating_ip_address
     else:
         self.server_ips[server['id']] =\
             server['addresses'][net_name][0]['addr']
     self.server_fixed_ips[server['id']] =\
         server['addresses'][net_name][0]['addr']
     self.assertTrue(self.servers_keypairs)
     return server
Beispiel #14
0
 def test_update_server_name_in_stop_state(self):
     # The server name should be changed to the the provided value
     server = self.create_test_server(wait_until='ACTIVE')
     self.client.stop(server['id'])
     waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
     updated_server = self._update_server_name(server['id'], 'SHUTOFF')
     self.assertNotIn('progress', updated_server)
    def _test_live_migration(self, state='ACTIVE', volume_backed=False):
        """Tests live migration between two hosts.

        Requires CONF.compute_feature_enabled.live_migration to be True.

        :param state: The vm_state the migrated server should be in before and
                      after the live migration. Supported values are 'ACTIVE'
                      and 'PAUSED'.
        :param volume_backed: If the instance is volume backed or not. If
                              volume_backed, *block* migration is not used.
        """
        # Live migrate an instance to another host
        server_id = self.create_test_server(wait_until="ACTIVE",
                                            volume_backed=volume_backed)['id']
        actual_host = self._get_host_for_server(server_id)
        target_host = self._get_host_other_than(actual_host)

        if state == 'PAUSED':
            self.admin_servers_client.pause_server(server_id)
            waiters.wait_for_server_status(self.admin_servers_client,
                                           server_id, state)

        self._migrate_server_to(server_id, target_host, volume_backed)
        waiters.wait_for_server_status(self.servers_client, server_id, state)
        migration_list = (self.admin_migration_client.list_migrations()
                          ['migrations'])

        msg = ("Live Migration failed. Migrations list for Instance "
               "%s: [" % server_id)
        for live_migration in migration_list:
            if (live_migration['instance_uuid'] == server_id):
                msg += "\n%s" % live_migration
        msg += "]"
        self.assertEqual(target_host, self._get_host_for_server(server_id),
                         msg)
Beispiel #16
0
    def test_rebuild_server_in_stop_state(self):
        # The server in stop state  should be rebuilt using the provided
        # image and remain in SHUTOFF state
        server = self.client.show_server(self.server_id)['server']
        old_image = server['image']['id']
        new_image = (self.image_ref_alt
                     if old_image == self.image_ref else self.image_ref)
        self.client.stop_server(self.server_id)
        waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
        rebuilt_server = (self.client.rebuild_server(self.server_id, new_image)
                          ['server'])
        # If the server was rebuilt on a different image, restore it to the
        # original image once the test ends
        if self.image_ref_alt != self.image_ref:
            self.addCleanup(self._rebuild_server_and_check, old_image)

        # Verify the properties in the initial response are correct
        self.assertEqual(self.server_id, rebuilt_server['id'])
        rebuilt_image_id = rebuilt_server['image']['id']
        self.assertEqual(new_image, rebuilt_image_id)
        self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])

        # Verify the server properties after the rebuild completes
        waiters.wait_for_server_status(self.client,
                                       rebuilt_server['id'], 'SHUTOFF')
        server = self.client.show_server(rebuilt_server['id'])['server']
        rebuilt_image_id = server['image']['id']
        self.assertEqual(new_image, rebuilt_image_id)

        self.client.start_server(self.server_id)
    def test_associate_already_associated_floating_ip(self):
        # positive test:Association of an already associated floating IP
        # to specific server should change the association of the Floating IP
        # Create server so as to use for Multiple association
        new_name = data_utils.rand_name('floating_server')
        body = self.create_test_server(name=new_name)
        waiters.wait_for_server_status(self.servers_client,
                                       body['id'], 'ACTIVE')
        self.new_server_id = body['id']
        self.addCleanup(self.servers_client.delete_server, self.new_server_id)

        # Associating floating IP for the first time
        self.client.associate_floating_ip_to_server(
            self.floating_ip,
            self.server_id)
        # Associating floating IP for the second time
        self.client.associate_floating_ip_to_server(
            self.floating_ip,
            self.new_server_id)

        self.addCleanup(self.client.disassociate_floating_ip_from_server,
                        self.floating_ip,
                        self.new_server_id)

        # Make sure no longer associated with old server
        self.assertRaises((lib_exc.NotFound,
                           lib_exc.UnprocessableEntity,
                           lib_exc.Conflict),
                          self.client.disassociate_floating_ip_from_server,
                          self.floating_ip, self.server_id)
Beispiel #18
0
 def _rebuild_server_and_check(self, image_ref):
     rebuilt_server = self.client.rebuild(self.server_id, image_ref)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
     msg = ('Server was not rebuilt to the original image. '
            'The original image: {0}. The current image: {1}'
            .format(image_ref, rebuilt_server['image']['id']))
     self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
    def test_shelve_shelved_server(self):
        # shelve a shelved server.
        self.client.shelve_server(self.server_id)

        offload_time = CONF.compute.shelved_offload_time
        if offload_time >= 0:
            waiters.wait_for_server_status(self.client,
                                           self.server_id,
                                           'SHELVED_OFFLOADED',
                                           extra_timeout=offload_time)
        else:
            waiters.wait_for_server_status(self.client,
                                           self.server_id,
                                           'SHELVED')

        server = self.client.show_server(self.server_id)
        image_name = server['name'] + '-shelved'
        params = {'name': image_name}
        images = self.images_client.list_images(**params)['images']
        self.assertEqual(1, len(images))
        self.assertEqual(image_name, images[0]['name'])

        self.assertRaises(lib_exc.Conflict,
                          self.client.shelve_server,
                          self.server_id)

        self.client.unshelve_server(self.server_id)
    def test_shelve_unshelve_server(self):
        if CONF.image_feature_enabled.api_v2:
            glance_client = self.os_primary.image_client_v2
        elif CONF.image_feature_enabled.api_v1:
            glance_client = self.os_primary.image_client
        else:
            raise lib_exc.InvalidConfiguration(
                'Either api_v1 or api_v2 must be True in '
                '[image-feature-enabled].')
        compute.shelve_server(self.client, self.server_id,
                              force_shelve_offload=True)

        def _unshelve_server():
            server_info = self.client.show_server(self.server_id)['server']
            if 'SHELVED' in server_info['status']:
                self.client.unshelve_server(self.server_id)
        self.addCleanup(_unshelve_server)

        server = self.client.show_server(self.server_id)['server']
        image_name = server['name'] + '-shelved'
        params = {'name': image_name}
        if CONF.image_feature_enabled.api_v2:
            images = glance_client.list_images(params)['images']
        elif CONF.image_feature_enabled.api_v1:
            images = glance_client.list_images(
                detail=True, **params)['images']
        self.assertEqual(1, len(images))
        self.assertEqual(image_name, images[0]['name'])

        self.client.unshelve_server(self.server_id)
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        glance_client.wait_for_resource_deletion(images[0]['id'])
 def test_shelve_paused_server(self):
     server = self.create_test_server(wait_until='ACTIVE')
     self.client.pause_server(server['id'])
     waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
     # Check if Shelve operation is successful on paused server.
     compute.shelve_server(self.client, server['id'],
                           force_shelve_offload=True)
    def _test_reboot_server(self, reboot_type):
        if CONF.validation.run_validation:
            validation_resources = self.get_class_validation_resources(
                self.os_primary)
            # Get the time the server was last rebooted,
            server = self.client.show_server(self.server_id)['server']
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server, validation_resources),
                self.ssh_user,
                self.password,
                validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.client)
            boot_time = linux_client.get_boot_time()

            # NOTE: This sync is for avoiding the loss of pub key data
            # in a server
            linux_client.exec_command("sync")

        self.client.reboot_server(self.server_id, type=reboot_type)
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')

        if CONF.validation.run_validation:
            # Log in and verify the boot time has changed
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server, validation_resources),
                self.ssh_user,
                self.password,
                validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.client)
            new_boot_time = linux_client.get_boot_time()
            self.assertGreater(new_boot_time, boot_time,
                               '%s > %s' % (new_boot_time, boot_time))
Beispiel #23
0
    def _test_cold_migrate_server(self, revert=False):
        if CONF.compute.min_compute_nodes < 2:
            msg = "Less than 2 compute nodes, skipping multinode tests."
            raise self.skipException(msg)

        server = self.create_test_server(wait_until="ACTIVE")
        src_host = self.admin_servers_client.show_server(
            server['id'])['server']['OS-EXT-SRV-ATTR:host']

        self.admin_servers_client.migrate_server(server['id'])

        waiters.wait_for_server_status(self.servers_client,
                                       server['id'], 'VERIFY_RESIZE')

        if revert:
            self.servers_client.revert_resize_server(server['id'])
            assert_func = self.assertEqual
        else:
            self.servers_client.confirm_resize_server(server['id'])
            assert_func = self.assertNotEqual

        waiters.wait_for_server_status(self.servers_client,
                                       server['id'], 'ACTIVE')
        dst_host = self.admin_servers_client.show_server(
            server['id'])['server']['OS-EXT-SRV-ATTR:host']
        assert_func(src_host, dst_host)
 def _rebuild_server_and_check(self, image_ref):
     rebuilt_server = self.client.rebuild_server(self.server_id, image_ref)["server"]
     waiters.wait_for_server_status(self.client, self.server_id, "ACTIVE")
     msg = "Server was not rebuilt to the original image. " "The original image: {0}. The current image: {1}".format(
         image_ref, rebuilt_server["image"]["id"]
     )
     self.assertEqual(image_ref, rebuilt_server["image"]["id"], msg)
    def _test_reboot_server(self, reboot_type):
        if CONF.validation.run_validation:
            # Get the time the server was last rebooted,
            server = self.client.show_server(self.server_id)["server"]
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server),
                self.ssh_user,
                self.password,
                self.validation_resources["keypair"]["private_key"],
                server=server,
                servers_client=self.client,
            )
            boot_time = linux_client.get_boot_time()

            # NOTE: This sync is for avoiding the loss of pub key data
            # in a server
            linux_client.exec_command("sync")

        self.client.reboot_server(self.server_id, type=reboot_type)
        waiters.wait_for_server_status(self.client, self.server_id, "ACTIVE")

        if CONF.validation.run_validation:
            # Log in and verify the boot time has changed
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server),
                self.ssh_user,
                self.password,
                self.validation_resources["keypair"]["private_key"],
                server=server,
                servers_client=self.client,
            )
            new_boot_time = linux_client.get_boot_time()
            self.assertTrue(new_boot_time > boot_time, "%s > %s" % (new_boot_time, boot_time))
    def _shelve_then_unshelve_server(self, server):
        compute.shelve_server(self.servers_client, server['id'],
                              force_shelve_offload=True)

        self.servers_client.unshelve_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')
Beispiel #27
0
    def _test_live_block_migration(self, state='ACTIVE'):
        """Tests live block migration between two hosts.

        Requires CONF.compute_feature_enabled.live_migration to be True.

        :param state: The vm_state the migrated server should be in before and
                      after the live migration. Supported values are 'ACTIVE'
                      and 'PAUSED'.
        """
        # Live block migrate an instance to another host
        if len(self._get_compute_hostnames()) < 2:
            raise self.skipTest(
                "Less than 2 compute nodes, skipping migration test.")
        server_id = self._get_an_active_server()
        actual_host = self._get_host_for_server(server_id)
        target_host = self._get_host_other_than(actual_host)

        if state == 'PAUSED':
            self.admin_servers_client.pause_server(server_id)
            waiters.wait_for_server_status(self.admin_servers_client,
                                           server_id, state)

        self._migrate_server_to(server_id, target_host)
        waiters.wait_for_server_status(self.servers_client, server_id, state)
        self.assertEqual(target_host, self._get_host_for_server(server_id))
Beispiel #28
0
    def boot_instance(self):
        self.instance = self.create_server(
            key_name=self.keypair['name'])

        self.wait_node(self.instance['id'])
        self.node = self.get_node(instance_id=self.instance['id'])

        self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)

        self.wait_provisioning_state(
            self.node['uuid'],
            [BaremetalProvisionStates.DEPLOYWAIT,
             BaremetalProvisionStates.ACTIVE],
            timeout=CONF.baremetal.deploywait_timeout)

        self.wait_provisioning_state(self.node['uuid'],
                                     BaremetalProvisionStates.ACTIVE,
                                     timeout=CONF.baremetal.active_timeout,
                                     interval=30)

        waiters.wait_for_server_status(self.servers_client,
                                       self.instance['id'], 'ACTIVE')
        self.node = self.get_node(instance_id=self.instance['id'])
        self.instance = (self.servers_client.show_server(self.instance['id'])
                         ['server'])
Beispiel #29
0
    def _test_reboot_server(self, reboot_type):
        if CONF.validation.run_validation:
            # Get the time the server was last rebooted,
            server = self.client.show_server(self.server_id)['server']
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server),
                self.ssh_user,
                self.password,
                self.validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.client)
            boot_time = linux_client.get_boot_time()

        self.client.reboot_server(self.server_id, type=reboot_type)
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')

        if CONF.validation.run_validation:
            # Log in and verify the boot time has changed
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server),
                self.ssh_user,
                self.password,
                self.validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.client)
            new_boot_time = linux_client.get_boot_time()
            self.assertTrue(new_boot_time > boot_time,
                            '%s > %s' % (new_boot_time, boot_time))
    def test_resize_server_revert_with_volume_attached(self):
        # Tests attaching a volume to a server instance and then resizing
        # the instance. Once the instance is resized, revert the resize which
        # should move the instance and volume attachment back to the original
        # compute host.

        # Create a blank volume and attach it to the server created in setUp.
        volume = self.create_volume()
        server = self.client.show_server(self.server_id)['server']
        self.attach_volume(server, volume)
        # Now resize the server with the blank volume attached.
        self.client.resize_server(self.server_id, self.flavor_ref_alt)
        # Explicitly delete the server to get a new one for later
        # tests. Avoids resize down race issues.
        self.addCleanup(self.delete_server, self.server_id)
        waiters.wait_for_server_status(
            self.client, self.server_id, 'VERIFY_RESIZE')
        # Now revert the resize which should move the instance and it's volume
        # attachment back to the original source compute host.
        self.client.revert_resize_server(self.server_id)
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        # Make sure everything still looks OK.
        server = self.client.show_server(self.server_id)['server']
        # The flavor id is not returned in the server response after
        # microversion 2.46 so handle that gracefully.
        if server['flavor'].get('id'):
            self.assertEqual(self.flavor_ref, server['flavor']['id'])
        attached_volumes = server['os-extended-volumes:volumes_attached']
        self.assertEqual(1, len(attached_volumes))
        self.assertEqual(volume['id'], attached_volumes[0]['id'])
Beispiel #31
0
 def test_stop_start_server(self):
     """Test stopping and starting server"""
     self.client.stop_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
     self.client.start_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
Beispiel #32
0
 def test_create_server_with_ipv6_addr_only(self):
     # Create a server without an IPv4 address(only IPv6 address).
     server = self.create_test_server(accessIPv6='2001:2001::3')
     waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
     server = self.client.show_server(server['id'])['server']
     self.assertEqual('2001:2001::3', server['accessIPv6'])
Beispiel #33
0
def create_test_server(clients, validatable=False, validation_resources=None,
                       tenant_network=None, wait_until=None,
                       volume_backed=False, name=None, flavor=None,
                       image_id=None, **kwargs):
    """Common wrapper utility returning a test server.

    This method is a common wrapper returning a test server that can be
    pingable or sshable.

    :param clients: Client manager which provides OpenStack Tempest clients.
    :param validatable: Whether the server will be pingable or sshable.
    :param validation_resources: Resources created for the connection to the
        server. Include a keypair, a security group and an IP.
    :param tenant_network: Tenant network to be used for creating a server.
    :param wait_until: Server status to wait for the server to reach after
        its creation.
    :param volume_backed: Whether the server is volume backed or not.
                          If this is true, a volume will be created and
                          create server will be requested with
                          'block_device_mapping_v2' populated with below
                          values:
                          --------------------------------------------
                          bd_map_v2 = [{
                              'uuid': volume['volume']['id'],
                              'source_type': 'volume',
                              'destination_type': 'volume',
                              'boot_index': 0,
                              'delete_on_termination': True}]
                          kwargs['block_device_mapping_v2'] = bd_map_v2
                          ---------------------------------------------
                          If server needs to be booted from volume with other
                          combination of bdm inputs than mentioned above, then
                          pass the bdm inputs explicitly as kwargs and image_id
                          as empty string ('').
    :param name: Name of the server to be provisioned. If not defined a random
        string ending with '-instance' will be generated.
    :param flavor: Flavor of the server to be provisioned. If not defined,
        CONF.compute.flavor_ref will be used instead.
    :param image_id: ID of the image to be used to provision the server. If not
        defined, CONF.compute.image_ref will be used instead.
    :returns: a tuple
    """

    # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE

    if name is None:
        name = data_utils.rand_name(__name__ + "-instance")
    if flavor is None:
        flavor = CONF.compute.flavor_ref
    if image_id is None:
        image_id = CONF.compute.image_ref

    kwargs = fixed_network.set_networks_kwarg(
        tenant_network, kwargs) or {}

    multiple_create_request = (max(kwargs.get('min_count', 0),
                                   kwargs.get('max_count', 0)) > 1)

    if CONF.validation.run_validation and validatable:
        # As a first implementation, multiple pingable or sshable servers will
        # not be supported
        if multiple_create_request:
            msg = ("Multiple pingable or sshable servers not supported at "
                   "this stage.")
            raise ValueError(msg)

        if 'security_groups' in kwargs:
            kwargs['security_groups'].append(
                {'name': validation_resources['security_group']['name']})
        else:
            try:
                kwargs['security_groups'] = [
                    {'name': validation_resources['security_group']['name']}]
            except KeyError:
                LOG.debug("No security group provided.")

        if 'key_name' not in kwargs:
            try:
                kwargs['key_name'] = validation_resources['keypair']['name']
            except KeyError:
                LOG.debug("No key provided.")

        if CONF.validation.connect_method == 'floating':
            if wait_until is None:
                wait_until = 'ACTIVE'

        if 'user_data' not in kwargs:
            # If nothing overrides the default user data script then run
            # a simple script on the host to print networking info. This is
            # to aid in debugging ssh failures.
            script = '''
                     #!/bin/sh
                     echo "Printing {user} user authorized keys"
                     cat ~{user}/.ssh/authorized_keys || true
                     '''.format(user=CONF.validation.image_ssh_user)
            script_clean = textwrap.dedent(script).lstrip().encode('utf8')
            script_b64 = base64.b64encode(script_clean)
            kwargs['user_data'] = script_b64

    if volume_backed:
        volume_name = data_utils.rand_name(__name__ + '-volume')
        volumes_client = clients.volumes_v2_client
        params = {'name': volume_name,
                  'imageRef': image_id,
                  'size': CONF.volume.volume_size}
        volume = volumes_client.create_volume(**params)
        waiters.wait_for_volume_resource_status(volumes_client,
                                                volume['volume']['id'],
                                                'available')

        bd_map_v2 = [{
            'uuid': volume['volume']['id'],
            'source_type': 'volume',
            'destination_type': 'volume',
            'boot_index': 0,
            'delete_on_termination': True}]
        kwargs['block_device_mapping_v2'] = bd_map_v2

        # Since this is boot from volume an image does not need
        # to be specified.
        image_id = ''

    body = clients.servers_client.create_server(name=name, imageRef=image_id,
                                                flavorRef=flavor,
                                                **kwargs)

    # handle the case of multiple servers
    if multiple_create_request:
        # Get servers created which name match with name param.
        body_servers = clients.servers_client.list_servers()
        servers = \
            [s for s in body_servers['servers'] if s['name'].startswith(name)]
    else:
        body = rest_client.ResponseBody(body.response, body['server'])
        servers = [body]

    # The name of the method to associate a floating IP to as server is too
    # long for PEP8 compliance so:
    assoc = clients.compute_floating_ips_client.associate_floating_ip_to_server

    if wait_until:
        for server in servers:
            try:
                waiters.wait_for_server_status(
                    clients.servers_client, server['id'], wait_until)

                # Multiple validatable servers are not supported for now. Their
                # creation will fail with the condition above (l.58).
                if CONF.validation.run_validation and validatable:
                    if CONF.validation.connect_method == 'floating':
                        assoc(floating_ip=validation_resources[
                              'floating_ip']['ip'],
                              server_id=servers[0]['id'])

            except Exception:
                with excutils.save_and_reraise_exception():
                    for server in servers:
                        try:
                            clients.servers_client.delete_server(
                                server['id'])
                        except Exception:
                            LOG.exception('Deleting server %s failed',
                                          server['id'])

    return body, servers
Beispiel #34
0
 def test_stop_start_server(self):
     self.client.stop_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
     self.client.start_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
Beispiel #35
0
 def test_suspend_resume_server(self):
     self.client.suspend_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id,
                                    'SUSPENDED')
     self.client.resume_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
Beispiel #36
0
 def test_pause_unpause_server(self):
     self.client.pause_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
     self.client.unpause_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
Beispiel #37
0
    def test_create_backup(self):
        # Positive test:create backup successfully and rotate backups correctly
        # create the first and the second backup

        # Check if glance v1 is available to determine which client to use. We
        # prefer glance v1 for the compute API tests since the compute image
        # API proxy was written for glance v1.
        if CONF.image_feature_enabled.api_v1:
            glance_client = self.os.image_client
        elif CONF.image_feature_enabled.api_v2:
            glance_client = self.os.image_client_v2
        else:
            raise exceptions.InvalidConfiguration(
                'Either api_v1 or api_v2 must be True in '
                '[image-feature-enabled].')

        backup1 = data_utils.rand_name('backup-1')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup1).response
        oldest_backup_exist = True

        # the oldest one should be deleted automatically in this test
        def _clean_oldest_backup(oldest_backup):
            if oldest_backup_exist:
                try:
                    glance_client.delete_image(oldest_backup)
                except lib_exc.NotFound:
                    pass
                else:
                    LOG.warning("Deletion of oldest backup %s should not have "
                                "been successful as it should have been "
                                "deleted during rotation." % oldest_backup)

        image1_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(_clean_oldest_backup, image1_id)
        waiters.wait_for_image_status(glance_client, image1_id, 'active')

        backup2 = data_utils.rand_name('backup-2')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup2).response
        image2_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(glance_client.delete_image, image2_id)
        waiters.wait_for_image_status(glance_client, image2_id, 'active')

        # verify they have been created
        properties = {
            'image_type': 'backup',
            'backup_type': "daily",
            'instance_uuid': self.server_id,
        }
        params = {
            'status': 'active',
            'sort_key': 'created_at',
            'sort_dir': 'asc'
        }
        if CONF.image_feature_enabled.api_v1:
            for key, value in properties.items():
                params['property-%s' % key] = value
            image_list = glance_client.list_images(detail=True,
                                                   **params)['images']
        else:
            # Additional properties are flattened in glance v2.
            params.update(properties)
            image_list = glance_client.list_images(params)['images']

        self.assertEqual(2, len(image_list))
        self.assertEqual((backup1, backup2),
                         (image_list[0]['name'], image_list[1]['name']))

        # create the third one, due to the rotation is 2,
        # the first one will be deleted
        backup3 = data_utils.rand_name('backup-3')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup3).response
        image3_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(glance_client.delete_image, image3_id)
        # the first back up should be deleted
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        glance_client.wait_for_resource_deletion(image1_id)
        oldest_backup_exist = False
        if CONF.image_feature_enabled.api_v1:
            image_list = glance_client.list_images(detail=True,
                                                   **params)['images']
        else:
            image_list = glance_client.list_images(params)['images']
        self.assertEqual(
            2, len(image_list), 'Unexpected number of images for '
            'v2:test_create_backup; was the oldest backup not '
            'yet deleted? Image list: %s' %
            [image['name'] for image in image_list])
        self.assertEqual((backup2, backup3),
                         (image_list[0]['name'], image_list[1]['name']))
    def create_server(self,
                      name=None,
                      image=None,
                      flavor=None,
                      wait_on_boot=True,
                      wait_on_delete=True,
                      servers_client=None,
                      tenant_id=None,
                      create_kwargs=None):
        """Creates VM instance.

        @param image: image from which to create the instance
        @param wait_on_boot: wait for status ACTIVE before continue
        @param wait_on_delete: force synchronous delete on cleanup
        @param servers_client: the servers_client to create VM
        @param create_kwargs: additional details for instance creation
        @return: server dict
        """
        name = name or data_utils.rand_name('topo-deploy-vm')
        image = image or CONF.compute.image_ref
        flavor = flavor or CONF.compute.flavor_ref
        servers_client = servers_client or self.servers_client
        create_kwargs = create_kwargs or {}
        if type(tenant_id) in (str, unicode):
            if servers_client.tenant_id != tenant_id:
                create_kwargs['tenant_id'] = tenant_id

        xmsg = ("Creating a server name=%(name)s, image=%(image)s"
                ", flavor=%(flavor)s, create_kwargs=%(create_kwargs)s" % {
                    'name': name,
                    'image': image,
                    'flavor': flavor,
                    'create_kwargs': str(create_kwargs)
                })
        LOG.debug(xmsg)
        server_resp = servers_client.create_server(name=name,
                                                   imageRef=image,
                                                   flavorRef=flavor,
                                                   **create_kwargs)
        server = server_resp['server']
        if wait_on_delete:
            self.addCleanup(waiters.wait_for_server_termination,
                            servers_client, server['id'])
        self.addCleanup_with_wait(
            waiter_callable=waiters.wait_for_server_termination,
            thing_id=server['id'],
            thing_id_param='server_id',
            waiter_client=servers_client,
            cleanup_callable=test_utils.call_and_ignore_notfound_exc,
            cleanup_args=[servers_client.delete_server, server['id']])
        if wait_on_boot:
            waiters.wait_for_server_status(client=servers_client,
                                           server_id=server['id'],
                                           status='ACTIVE')
        # The instance retrieved on creation is missing network
        # details, necessitating retrieval after it becomes active to
        # ensure correct details.
        server_resp = servers_client.show_server(server['id'])
        server = server_resp['server']
        self.assertEqual(server['name'], name)
        self.servers_on_net[server['id']] = server
        return server
 def test_rescue_server(self):
     """Test rescue server, part of os-rescue."""
     with self.override_role():
         self.servers_client.rescue_server(self.server['id'])
     waiters.wait_for_server_status(self.servers_client, self.server['id'],
                                    'RESCUE')
Beispiel #40
0
 def test_pause_unpause_server(self):
     """Test pausing and unpausing server"""
     self.client.pause_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
     self.client.unpause_server(self.server_id)
     waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
Beispiel #41
0
 def _wait_for_server_status(self, status):
     for server in self.servers:
         # Make sure nova list keeps working throughout the build process
         self.servers_client.list_servers()
         waiters.wait_for_server_status(self.servers_client,
                                        server['id'], status)
Beispiel #42
0
    def resource_setup(cls):
        super(ListImageFiltersTestJSON, cls).resource_setup()

        def _create_image():
            params = {
                'name': data_utils.rand_name(cls.__name__ + '-image'),
                'container_format': 'bare',
                'disk_format': 'raw'
            }
            if CONF.image_feature_enabled.api_v1:
                params.update({'is_public': False})
                params = {'headers':
                          common_image.image_meta_to_headers(**params)}
            else:
                params.update({'visibility': 'private'})

            body = cls.glance_client.create_image(**params)
            body = body['image'] if 'image' in body else body
            image_id = body['id']
            cls.addClassResourceCleanup(
                test_utils.call_and_ignore_notfound_exc,
                cls.compute_images_client.delete_image,
                image_id)
            # Wait 1 second between creation and upload to ensure a delta
            # between created_at and updated_at.
            time.sleep(1)
            image_file = io.BytesIO((b'*' * 1024))
            if CONF.image_feature_enabled.api_v1:
                cls.glance_client.update_image(image_id, data=image_file)
            else:
                cls.glance_client.store_image_file(image_id, data=image_file)
            waiters.wait_for_image_status(cls.client, image_id, 'ACTIVE')
            body = cls.client.show_image(image_id)['image']
            return body

        # Create non-snapshot images via glance
        cls.image1 = _create_image()
        cls.image1_id = cls.image1['id']
        cls.image2 = _create_image()
        cls.image2_id = cls.image2['id']
        cls.image3 = _create_image()
        cls.image3_id = cls.image3['id']

        if not CONF.compute_feature_enabled.snapshot:
            return

        # Create instances and snapshots via nova
        cls.server1 = cls.create_test_server()
        cls.server2 = cls.create_test_server(wait_until='ACTIVE')
        # NOTE(sdague) this is faster than doing the sync wait_util on both
        waiters.wait_for_server_status(cls.servers_client,
                                       cls.server1['id'], 'ACTIVE')

        # Create images to be used in the filter tests
        cls.snapshot1 = cls.create_image_from_server(
            cls.server1['id'], wait_until='ACTIVE')
        cls.snapshot1_id = cls.snapshot1['id']

        # Servers have a hidden property for when they are being imaged
        # Performing back-to-back create image calls on a single
        # server will sometimes cause failures
        cls.snapshot3 = cls.create_image_from_server(
            cls.server2['id'], wait_until='ACTIVE')
        cls.snapshot3_id = cls.snapshot3['id']

        # Wait for the server to be active after the image upload
        cls.snapshot2 = cls.create_image_from_server(
            cls.server1['id'], wait_until='ACTIVE')
        cls.snapshot2_id = cls.snapshot2['id']
Beispiel #43
0
def create_test_server(clients,
                       validatable=False,
                       validation_resources=None,
                       tenant_network=None,
                       wait_until=None,
                       volume_backed=False,
                       **kwargs):
    """Common wrapper utility returning a test server.

    This method is a common wrapper returning a test server that can be
    pingable or sshable.

    :param clients: Client manager which provides OpenStack Tempest clients.
    :param validatable: Whether the server will be pingable or sshable.
    :param validation_resources: Resources created for the connection to the
    server. Include a keypair, a security group and an IP.
    :param tenant_network: Tenant network to be used for creating a server.
    :param wait_until: Server status to wait for the server to reach after
    its creation.
    :param volume_backed: Whether the instance is volume backed or not.
    :returns a tuple
    """

    # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE

    if 'name' in kwargs:
        name = kwargs.pop('name')
    else:
        name = data_utils.rand_name(__name__ + "-instance")

    flavor = kwargs.pop('flavor', CONF.compute.flavor_ref)
    image_id = kwargs.pop('image_id', CONF.compute.image_ref)

    kwargs = fixed_network.set_networks_kwarg(tenant_network, kwargs) or {}

    if CONF.validation.run_validation and validatable:
        # As a first implementation, multiple pingable or sshable servers will
        # not be supported
        if 'min_count' in kwargs or 'max_count' in kwargs:
            msg = ("Multiple pingable or sshable servers not supported at "
                   "this stage.")
            raise ValueError(msg)

        if 'security_groups' in kwargs:
            kwargs['security_groups'].append(
                {'name': validation_resources['security_group']['name']})
        else:
            try:
                kwargs['security_groups'] = [{
                    'name':
                    validation_resources['security_group']['name']
                }]
            except KeyError:
                LOG.debug("No security group provided.")

        if 'key_name' not in kwargs:
            try:
                kwargs['key_name'] = validation_resources['keypair']['name']
            except KeyError:
                LOG.debug("No key provided.")

        if CONF.validation.connect_method == 'floating':
            if wait_until is None:
                wait_until = 'ACTIVE'

    if volume_backed:
        volume_name = data_utils.rand_name('volume')
        volumes_client = clients.volumes_v2_client
        if CONF.volume_feature_enabled.api_v1:
            volumes_client = clients.volumes_client
        volume = volumes_client.create_volume(display_name=volume_name,
                                              imageRef=image_id)
        volumes_client.wait_for_volume_status(volume['volume']['id'],
                                              'available')

        bd_map_v2 = [{
            'uuid': volume['volume']['id'],
            'source_type': 'volume',
            'destination_type': 'volume',
            'boot_index': 0,
            'delete_on_termination': True
        }]
        kwargs['block_device_mapping_v2'] = bd_map_v2

        # Since this is boot from volume an image does not need
        # to be specified.
        image_id = ''

    body = clients.servers_client.create_server(name=name,
                                                imageRef=image_id,
                                                flavorRef=flavor,
                                                **kwargs)

    # handle the case of multiple servers
    servers = []
    if 'min_count' in kwargs or 'max_count' in kwargs:
        # Get servers created which name match with name param.
        body_servers = clients.servers_client.list_servers()
        servers = \
            [s for s in body_servers['servers'] if s['name'].startswith(name)]
    else:
        body = service_client.ResponseBody(body.response, body['server'])
        servers = [body]

    # The name of the method to associate a floating IP to as server is too
    # long for PEP8 compliance so:
    assoc = clients.compute_floating_ips_client.associate_floating_ip_to_server

    if wait_until:
        for server in servers:
            try:
                waiters.wait_for_server_status(clients.servers_client,
                                               server['id'], wait_until)

                # Multiple validatable servers are not supported for now. Their
                # creation will fail with the condition above (l.58).
                if CONF.validation.run_validation and validatable:
                    if CONF.validation.connect_method == 'floating':
                        assoc(floating_ip=validation_resources['floating_ip']
                              ['ip'],
                              server_id=servers[0]['id'])

            except Exception:
                with excutils.save_and_reraise_exception():
                    if ('preserve_server_on_error' not in kwargs
                            or kwargs['preserve_server_on_error'] is False):
                        for server in servers:
                            try:
                                clients.servers_client.delete_server(
                                    server['id'])
                            except Exception:
                                LOG.exception('Deleting server %s failed' %
                                              server['id'])

    return body, servers
 def _wait_server_status_and_check_network_connectivity(
         self, server, keypair, floating_ip):
     waiters.wait_for_server_status(self.servers_client, server['id'],
                                    'ACTIVE')
     self._check_network_connectivity(server, keypair, floating_ip)
Beispiel #45
0
 def _unrescue(self, server_id):
     self.servers_client.unrescue_server(server_id)
     waiters.wait_for_server_status(self.servers_client, server_id,
                                    'ACTIVE')
    def test_create_backup(self):
        # Positive test:create backup successfully and rotate backups correctly
        # create the first and the second backup
        backup1 = data_utils.rand_name('backup-1')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup1).response
        oldest_backup_exist = True

        # the oldest one should be deleted automatically in this test
        def _clean_oldest_backup(oldest_backup):
            if oldest_backup_exist:
                try:
                    self.os.image_client.delete_image(oldest_backup)
                except lib_exc.NotFound:
                    pass
                else:
                    LOG.warning("Deletion of oldest backup %s should not have "
                                "been successful as it should have been "
                                "deleted during rotation." % oldest_backup)

        image1_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(_clean_oldest_backup, image1_id)
        self.os.image_client.wait_for_image_status(image1_id, 'active')

        backup2 = data_utils.rand_name('backup-2')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup2).response
        image2_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(self.os.image_client.delete_image, image2_id)
        self.os.image_client.wait_for_image_status(image2_id, 'active')

        # verify they have been created
        properties = {
            'image_type': 'backup',
            'backup_type': "daily",
            'instance_uuid': self.server_id,
        }
        image_list = self.os.image_client.list_images(detail=True,
                                                      properties=properties,
                                                      status='active',
                                                      sort_key='created_at',
                                                      sort_dir='asc')['images']
        self.assertEqual(2, len(image_list))
        self.assertEqual((backup1, backup2),
                         (image_list[0]['name'], image_list[1]['name']))

        # create the third one, due to the rotation is 2,
        # the first one will be deleted
        backup3 = data_utils.rand_name('backup-3')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup3).response
        image3_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(self.os.image_client.delete_image, image3_id)
        # the first back up should be deleted
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        self.os.image_client.wait_for_resource_deletion(image1_id)
        oldest_backup_exist = False
        image_list = self.os.image_client.list_images(detail=True,
                                                      properties=properties,
                                                      status='active',
                                                      sort_key='created_at',
                                                      sort_dir='asc')['images']
        self.assertEqual(
            2, len(image_list), 'Unexpected number of images for '
            'v2:test_create_backup; was the oldest backup not '
            'yet deleted? Image list: %s' %
            [image['name'] for image in image_list])
        self.assertEqual((backup2, backup3),
                         (image_list[0]['name'], image_list[1]['name']))
    def test_nova_evacuate_reboot_hv(self):
        """test_nova_evacuate_reboot_hv

        Spin a VM on a hypervisor
        Bring down hypervisor
        Nova evacuate VM
        Bring up hypervisor
        Make sure connectivity remains
        """
        network = self.create_network()
        subnet = self.create_subnet(network,
                                    ip_version=4,
                                    mask_bits=24,
                                    enable_dhcp=True)
        router = self.create_router(
            external_network_id=CONF.network.public_network_id)
        self.router_attach(router, subnet)
        security_group = self.create_open_ssh_security_group()
        server1 = self.create_tenant_server(networks=[network],
                                            security_groups=[security_group],
                                            prepare_for_connectivity=True)
        server2 = self.create_tenant_server(
            networks=[network],
            security_groups=[security_group],
            prepare_for_connectivity=True,
            scheduler_hints={'different_host': server1.id})

        # Find the server that is running on the local compute
        if server1.get_hypervisor_hostname() == socket.gethostname():
            local_server = server1
            remote_server = server2
        else:
            local_server = server2
            remote_server = server1

        self.assert_ping(local_server,
                         remote_server,
                         network=network,
                         ip_version=4)

        remote_host = remote_server.get_hypervisor_hostname()
        # strip out novalocal if necessary
        remote_host.replace('.novalocal', '')

        # connect to undercloud / controller of hypervisor
        # Requirement: clouds.yaml with undercloud defined
        connection = openstack.connect(cloud=CONF.nuage_sut.undercloud_name)
        # Find remote host, a hv in the undercloud
        remote_host_hv = connection.compute.find_server(remote_host)
        connection.compute.stop_server(remote_host_hv)

        def cleanup_stopped_remote_hv_server(hv_server):
            # Assure remote hypervisor is booted at end of test even when
            # there is a failure.
            try:
                connection.compute.start_server(hv_server)
            except SDKException:
                # Already started VM
                pass

        self.addCleanup(cleanup_stopped_remote_hv_server, remote_host_hv)
        connection.compute.wait_for_server(remote_host_hv, status='SHUTOFF')

        # Nova evacuate
        # Wait for compute service to notice outage.
        time.sleep(60)
        base_compute_client.COMPUTE_MICROVERSION = 'latest'
        enable_instance_password = (
            self.admin_manager.servers_client.enable_instance_password)
        self.admin_manager.servers_client.enable_instance_password = False
        self.admin_manager.servers_client.evacuate_server(remote_server.id)
        self.admin_manager.servers_client.enable_instance_password = (
            enable_instance_password)
        base_compute_client.COMPUTE_MICROVERSION = None
        waiters.wait_for_server_status(self.manager.servers_client,
                                       remote_server.id, 'ACTIVE')
        remote_server.waiting_for_cloudinit_completion = False
        remote_server.cloudinit_complete = False
        remote_server.wait_for_cloudinit_to_complete()

        # Assert traffic is restored
        self.assert_ping(local_server,
                         remote_server,
                         network=network,
                         ip_version=4)

        # Boot remote hv
        connection.compute.start_server(remote_host_hv)
        connection.compute.wait_for_server(remote_host_hv, status='ACTIVE')

        # Wait 60s for boot process to finish
        time.sleep(60)

        # Assert traffic is not aborted by remote hv booting
        self.assert_ping(local_server,
                         remote_server,
                         network=network,
                         ip_version=4)
 def test_cold_migration(self):
     server = self.create_test_server(wait_until="ACTIVE")
     with self.override_role():
         self.servers_client.migrate_server(server['id'])
     waiters.wait_for_server_status(self.servers_client,
                                    server['id'], 'VERIFY_RESIZE')
 def _resize_server(self, flavor):
     self.servers_client.resize_server(self.server_id, flavor)
     waiters.wait_for_server_status(self.os_admin.servers_client,
                                    self.server_id, 'VERIFY_RESIZE')
Beispiel #50
0
    def test_assign_pci_soft_reboot_instance(self):
        #Get PCI related parameter and ready to test
        pci.get_pci_config(self)
        for info in self.infoList:
            info = info.split(':')
            name = info[0]
            pciid = info[1]
	    flavor_with_pci_id = pci.create_flavor_with_extra_specs(self,name)

            admin_pass = self.image_ssh_password

	    cont = pci.gen_rc_local_dict(pci.RC_LSPCI)
            print cont
            personality = [
                {'path': "/etc/rc.local",
                 'contents': cont}]

            user_data = pci.gen_user_data("\n".join(pci.CONSOLE_DATA))
	    server_with_pci = (self.create_test_server(
                                      wait_until='ACTIVE',
                                      user_data=user_data,
                                      personality=personality,
                                      adminPass=admin_pass,
                                      flavor=flavor_with_pci_id))


            addresses = self.client.show_server(server_with_pci['id'])['server']
            password = '******'
            self.server_id = server_with_pci['id']
            print self.server_id
            print "cubswin:)"
            pci_info = pci.retry_get_pci_output(
                self.client.get_console_output, self.server_id)

            expect_pci = filter(lambda x: pciid in x, pci_info)
            self.assertTrue(not not expect_pci)

            pci_count = len(expect_pci)
            self.assertEqual(1, pci_count)
 
            #server = self.client.show_server(self.server_id)
            #linux_client = remote_client.RemoteClient(addresses,
            #                                self.ssh_user, password)
            #pci_flag = linux_client.get_pci(pciid)
            #self.assertTrue(pci_flag is not None)

	    #pci_count = linux_client.get_pci_count(pciid)
            #pci_count = pci_count.strip()
            #self.assertEqual('1',pci_count)
            
            #self.client.reboot_server(self.server_id, 'SOFT')
            #self.assertEqual(202, resp.status)
            #waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')



	    self.servers_client.reboot_server(self.server_id, type='SOFT')
            waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
            print self.server_id
            print "cubswin:)"

            pci_info = pci.retry_get_pci_output(
                self.client.get_console_output, self.server_id,
                DELIMITER="RC LSPCI")

            expect_pci = filter(lambda x: pciid in x, pci_info)
            self.assertTrue(not not expect_pci)

            pci_count = len(expect_pci)
            self.assertEqual(1, pci_count)
 def test_reboot_server(self):
     self.rbac_utils.switch_role(self, toggle_rbac_role=True)
     self.servers_client.reboot_server(self.server_id, type='HARD')
     waiters.wait_for_server_status(self.os_admin.servers_client,
                                    self.server_id, 'ACTIVE')
 def _confirm_resize_server(self):
     self.servers_client.confirm_resize_server(self.server_id)
     waiters.wait_for_server_status(self.os_admin.servers_client,
                                    self.server_id, 'ACTIVE')
Beispiel #53
0
def create_test_server(clients, validatable=False, validation_resources=None,
                       tenant_network=None, wait_until=None,
                       volume_backed=False, name=None, flavor=None,
                       image_id=None, **kwargs):
    """Common wrapper utility returning a test server.

    This method is a common wrapper returning a test server that can be
    pingable or sshable.

    :param clients: Client manager which provides OpenStack Tempest clients.
    :param validatable: Whether the server will be pingable or sshable.
    :param validation_resources: Resources created for the connection to the
        server. Include a keypair, a security group and an IP.
    :param tenant_network: Tenant network to be used for creating a server.
    :param wait_until: Server status to wait for the server to reach after
        its creation. Additionally PINGABLE and SSHABLE states are also
        accepted when the server is both validatable and has the required
        validation_resources provided.
    :param volume_backed: Whether the server is volume backed or not.
        If this is true, a volume will be created and create server will be
        requested with 'block_device_mapping_v2' populated with below values:

        .. code-block:: python

            bd_map_v2 = [{
                'uuid': volume['volume']['id'],
                'source_type': 'volume',
                'destination_type': 'volume',
                'boot_index': 0,
                'delete_on_termination': True}]
            kwargs['block_device_mapping_v2'] = bd_map_v2

        If server needs to be booted from volume with other combination of bdm
        inputs than mentioned above, then pass the bdm inputs explicitly as
        kwargs and image_id as empty string ('').
    :param name: Name of the server to be provisioned. If not defined a random
        string ending with '-instance' will be generated.
    :param flavor: Flavor of the server to be provisioned. If not defined,
        CONF.compute.flavor_ref will be used instead.
    :param image_id: ID of the image to be used to provision the server. If not
        defined, CONF.compute.image_ref will be used instead.
    :returns: a tuple
    """

    if name is None:
        name = data_utils.rand_name(__name__ + "-instance")
    if flavor is None:
        flavor = CONF.compute.flavor_ref
    if image_id is None:
        image_id = CONF.compute.image_ref

    kwargs = fixed_network.set_networks_kwarg(
        tenant_network, kwargs) or {}

    multiple_create_request = (max(kwargs.get('min_count', 0),
                                   kwargs.get('max_count', 0)) > 1)

    if CONF.validation.run_validation and validatable:
        # As a first implementation, multiple pingable or sshable servers will
        # not be supported
        if multiple_create_request:
            msg = ("Multiple pingable or sshable servers not supported at "
                   "this stage.")
            raise ValueError(msg)

        LOG.debug("Provisioning test server with validation resources %s",
                  validation_resources)
        if 'security_groups' in kwargs:
            kwargs['security_groups'].append(
                {'name': validation_resources['security_group']['name']})
        else:
            try:
                kwargs['security_groups'] = [
                    {'name': validation_resources['security_group']['name']}]
            except KeyError:
                LOG.debug("No security group provided.")

        if 'key_name' not in kwargs:
            try:
                kwargs['key_name'] = validation_resources['keypair']['name']
            except KeyError:
                LOG.debug("No key provided.")

        if CONF.validation.connect_method == 'floating':
            if wait_until is None:
                wait_until = 'ACTIVE'

        if 'user_data' not in kwargs:
            # If nothing overrides the default user data script then run
            # a simple script on the host to print networking info. This is
            # to aid in debugging ssh failures.
            script = '''
                     #!/bin/sh
                     echo "Printing {user} user authorized keys"
                     cat ~{user}/.ssh/authorized_keys || true
                     '''.format(user=CONF.validation.image_ssh_user)
            script_clean = textwrap.dedent(script).lstrip().encode('utf8')
            script_b64 = base64.b64encode(script_clean)
            kwargs['user_data'] = script_b64

    if volume_backed:
        volume_name = data_utils.rand_name(__name__ + '-volume')
        volumes_client = clients.volumes_client_latest
        params = {'name': volume_name,
                  'imageRef': image_id,
                  'size': CONF.volume.volume_size}
        if CONF.compute.compute_volume_common_az:
            params.setdefault('availability_zone',
                              CONF.compute.compute_volume_common_az)
        volume = volumes_client.create_volume(**params)
        try:
            waiters.wait_for_volume_resource_status(volumes_client,
                                                    volume['volume']['id'],
                                                    'available')
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    volumes_client.delete_volume(volume['volume']['id'])
                    volumes_client.wait_for_resource_deletion(
                        volume['volume']['id'])
                except Exception as exc:
                    LOG.exception("Deleting volume %s failed, exception %s",
                                  volume['volume']['id'], exc)
        bd_map_v2 = [{
            'uuid': volume['volume']['id'],
            'source_type': 'volume',
            'destination_type': 'volume',
            'boot_index': 0,
            'delete_on_termination': True}]
        kwargs['block_device_mapping_v2'] = bd_map_v2

        # Since this is boot from volume an image does not need
        # to be specified.
        image_id = ''

    if CONF.compute.compute_volume_common_az:
        kwargs.setdefault('availability_zone',
                          CONF.compute.compute_volume_common_az)
    body = clients.servers_client.create_server(name=name, imageRef=image_id,
                                                flavorRef=flavor,
                                                **kwargs)
    request_id = body.response['x-openstack-request-id']

    # handle the case of multiple servers
    if multiple_create_request:
        # Get servers created which name match with name param.
        body_servers = clients.servers_client.list_servers()
        servers = \
            [s for s in body_servers['servers'] if s['name'].startswith(name)]
    else:
        body = rest_client.ResponseBody(body.response, body['server'])
        servers = [body]

    if wait_until:

        # NOTE(lyarwood): PINGABLE and SSHABLE both require the instance to
        # go ACTIVE initially before we can setup the fip(s) etc so stash
        # this additional wait state for later use.
        wait_until_extra = None
        if wait_until in ['PINGABLE', 'SSHABLE']:
            wait_until_extra = wait_until
            wait_until = 'ACTIVE'

        for server in servers:
            try:
                waiters.wait_for_server_status(
                    clients.servers_client, server['id'], wait_until,
                    request_id=request_id)
                if CONF.validation.run_validation and validatable:
                    if CONF.validation.connect_method == 'floating':
                        _setup_validation_fip(
                            server, clients, tenant_network,
                            validation_resources)
                    if wait_until_extra:
                        wait_for_ssh_or_ping(
                            server, clients, tenant_network,
                            validatable, validation_resources,
                            wait_until_extra, False)

            except Exception:
                with excutils.save_and_reraise_exception():
                    for server in servers:
                        try:
                            clients.servers_client.delete_server(
                                server['id'])
                        except Exception:
                            LOG.exception('Deleting server %s failed',
                                          server['id'])
                    for server in servers:
                        # NOTE(artom) If the servers were booted with volumes
                        # and with delete_on_termination=False we need to wait
                        # for the servers to go away before proceeding with
                        # cleanup, otherwise we'll attempt to delete the
                        # volumes while they're still attached to servers that
                        # are in the process of being deleted.
                        try:
                            waiters.wait_for_server_termination(
                                clients.servers_client, server['id'])
                        except Exception:
                            LOG.exception('Server %s failed to delete in time',
                                          server['id'])

    return body, servers
 def _stop_server(self):
     self.servers_client.stop_server(self.server_id)
     waiters.wait_for_server_status(self.os_admin.servers_client,
                                    self.server_id, 'SHUTOFF')
Beispiel #55
0
    def create_image_from_server(cls, server_id, **kwargs):
        """Wrapper utility that returns an image created from the server.

        If compute microversion >= 2.36, the returned image response will
        be from the image service API rather than the compute image proxy API.
        """
        name = kwargs.pop('name',
                          data_utils.rand_name(cls.__name__ + "-image"))
        wait_until = kwargs.pop('wait_until', None)
        wait_for_server = kwargs.pop('wait_for_server', True)

        image = cls.compute_images_client.create_image(server_id,
                                                       name=name,
                                                       **kwargs)
        if api_version_utils.compare_version_header_to_response(
                "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
            image_id = image['image_id']
        else:
            image_id = data_utils.parse_image_id(image.response['location'])

        # The compute image proxy APIs were deprecated in 2.35 so
        # use the images client directly if the API microversion being
        # used is >=2.36.
        if not cls.is_requested_microversion_compatible('2.35'):
            client = cls.images_client
        else:
            client = cls.compute_images_client
        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                    client.delete_image, image_id)

        if wait_until is not None:
            try:
                wait_until = wait_until.upper()
                if not cls.is_requested_microversion_compatible('2.35'):
                    wait_until = wait_until.lower()
                waiters.wait_for_image_status(client, image_id, wait_until)
            except lib_exc.NotFound:
                if wait_until.upper() == 'ACTIVE':
                    # If the image is not found after create_image returned
                    # that means the snapshot failed in nova-compute and nova
                    # deleted the image. There should be a compute fault
                    # recorded with the server in that case, so get the server
                    # and dump some details.
                    server = (
                        cls.servers_client.show_server(server_id)['server'])
                    if 'fault' in server:
                        raise exceptions.SnapshotNotFoundException(
                            server['fault'], image_id=image_id)
                    else:
                        raise exceptions.SnapshotNotFoundException(
                            image_id=image_id)
                else:
                    raise
            image = client.show_image(image_id)
            # Compute image client returns response wrapped in 'image' element
            # which is not the case with Glance image client.
            if 'image' in image:
                image = image['image']

            if wait_until.upper() == 'ACTIVE':
                if wait_for_server:
                    waiters.wait_for_server_status(cls.servers_client,
                                                   server_id, 'ACTIVE')
        return image
 def test_rebuild_server(self):
     self.rbac_utils.switch_role(self, toggle_rbac_role=True)
     self.servers_client.rebuild_server(self.server_id, self.image_ref)
     waiters.wait_for_server_status(self.os_admin.servers_client,
                                    self.server_id, 'ACTIVE')
Beispiel #57
0
 def wait_for_servers_become_active(self, server_list):
     for serv in server_list:
         waiters.wait_for_server_status(self.manager.servers_client,
                                        serv['id'], 'ACTIVE')
Beispiel #58
0
 def reboot_server(self, server_id, type):
     """Reboot a server and wait for it to be ACTIVE."""
     self.servers_client.reboot_server(server_id, type=type)
     waiters.wait_for_server_status(self.servers_client, server_id,
                                    'ACTIVE')
Beispiel #59
0
 def reboot_instance(self, instance_id):
     """Reboot the instance with the given id."""
     self.servers_client.reboot_server(server_id=instance_id, type='soft')
     waiters.wait_for_server_status(self.servers_client, instance_id,
                                    'ACTIVE')
Beispiel #60
0
 def _start_server(self, name):
     for sname, value in six.iteritems(self.servers):
         if sname == name:
             self.servers_client.start(value)
             waiters.wait_for_server_status(self.servers_client, value,
                                            'ACTIVE')