コード例 #1
0
    def test_attach_detach_volume(self):
        # Stop and Start a server with an attached volume, ensuring that
        # the volume remains attached.
        self._create_and_attach()
        server = self.server
        volume = self.volume

        self.servers_client.stop(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

        self.servers_client.start(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

        linux_client = RemoteClient(server,
                                    self.image_ssh_user, server['adminPass'])
        partitions = linux_client.get_partitions()
        self.assertIn(self.device, partitions)

        self._detach(server['id'], volume['id'])
        self.attached = False

        self.servers_client.stop(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

        self.servers_client.start(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

        linux_client = RemoteClient(server,
                                    self.image_ssh_user, server['adminPass'])
        partitions = linux_client.get_partitions()
        self.assertNotIn(self.device, partitions)
コード例 #2
0
    def test_attach_detach_volume(self):
        # Stop and Start a server with an attached volume, ensuring that
        # the volume remains attached.
        self._create_and_attach()
        server = self.server
        volume = self.volume

        self.servers_client.stop(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

        self.servers_client.start(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

        linux_client = RemoteClient(server, self.image_ssh_user,
                                    server['admin_password'])
        partitions = linux_client.get_partitions()
        self.assertIn(self.device, partitions)

        self._detach(server['id'], volume['id'])
        self.attached = False

        self.servers_client.stop(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

        self.servers_client.start(server['id'])
        self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

        linux_client = RemoteClient(server, self.image_ssh_user,
                                    server['admin_password'])
        partitions = linux_client.get_partitions()
        self.assertNotIn(self.device, partitions)
コード例 #3
0
ファイル: test_attach_volume.py プロジェクト: andymg/tempest
    def test_attach_detach_volume(self):
        # Stop and Start a server with an attached volume, ensuring that
        # the volume remains attached.
        try:
            self._create_and_attach()
            server = self.server
            volume = self.volume

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server,
                                        self.ssh_user, server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertTrue(self.device in partitions)

            self._detach(server['id'], volume['id'])
            self.attached = False

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server,
                                        self.ssh_user, server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertFalse(self.device in partitions)
        except Exception:
            self.fail("The test_attach_detach_volume is faild!")
        finally:
            if self.attached:
                self._detach(server['id'], volume['id'])
            # NOTE(maurosr): here we do the cleanup for volume, servers are
            # dealt on BaseComputeTest.tearDownClass
            self._delete(self.volume)
コード例 #4
0
ファイル: test_attach_volume.py プロジェクト: nateben/tempest
    def test_attach_detach_volume(self):
        # Stop and Start a server with an attached volume, ensuring that
        # the volume remains attached.
        try:
            self._create_and_attach()
            server = self.server
            volume = self.volume

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server, self.ssh_user,
                                        server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertTrue(self.device in partitions)

            self._detach(server['id'], volume['id'])
            self.attached = False

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server, self.ssh_user,
                                        server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertFalse(self.device in partitions)
        except Exception:
            self.fail("The test_attach_detach_volume is faild!")
        finally:
            if self.attached:
                self._detach(server['id'], volume['id'])
            # NOTE(maurosr): here we do the cleanup for volume, servers are
            # dealt on BaseComputeTest.tearDownClass
            self._delete(self.volume)
コード例 #5
0
    def test_attach_detach_volume(self):
        """
        Stop and Start a server with an attached volume, ensuring that
        the volume remains attached.
        """
        server, volume = self._create_and_attach()

        attached = True

        try:
            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server,
                                        self.ssh_user, server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertTrue(self.device in partitions)

            self._detach(server['id'], volume['id'])
            attached = False

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server,
                                        self.ssh_user, server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertFalse(self.device in partitions)
        finally:
            if attached:
                self._detach(server['id'], volume['id'])
            self._delete(server['id'], volume['id'])
コード例 #6
0
    def test_attach_detach_volume(self):
        """
        Stop and Start a server with an attached volume, ensuring that
        the volume remains attached.
        """
        server, volume = self._create_and_attach()

        attached = True

        try:
            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server, self.ssh_user,
                                        server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertTrue(self.device in partitions)

            self._detach(server['id'], volume['id'])
            attached = False

            self.servers_client.stop(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')

            self.servers_client.start(server['id'])
            self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')

            linux_client = RemoteClient(server, self.ssh_user,
                                        server['adminPass'])
            partitions = linux_client.get_partitions()
            self.assertFalse(self.device in partitions)
        finally:
            if attached:
                self._detach(server['id'], volume['id'])
            self._delete(server['id'], volume['id'])
コード例 #7
0
    def test_integration_1(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup-")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))
        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(1, self.zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports availble before it is available

        ssh = RemoteClient(address.public_ip,
                           CONF.compute.ssh_user,
                           pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output -")
        resp = ssh.write_to_console(text)
        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(_volume_state, "available")
        re_search_wait(_volume_state, "available")
        LOG.info("Volume %s state: %s", volume.id, volume.status)

        state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
コード例 #8
0
    def test_verify_created_server_ephemeral_disk(self):
        # Verify that the ephemeral disk is created when creating server

        def create_flavor_with_extra_specs(self):
            flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
            flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
            ram = 512
            vcpus = 1
            disk = 10

            # Create a flavor with extra specs
            resp, flavor = (self.flavor_client.create_flavor(
                flavor_with_eph_disk_name,
                ram,
                vcpus,
                disk,
                flavor_with_eph_disk_id,
                ephemeral=1,
                swap=1024,
                rxtx=1))
            self.addCleanup(self.flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def create_flavor_without_extra_specs(self):
            flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
            flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)

            ram = 512
            vcpus = 1
            disk = 10

            # Create a flavor without extra specs
            resp, flavor = (self.flavor_client.create_flavor(
                flavor_no_eph_disk_name, ram, vcpus, disk,
                flavor_no_eph_disk_id))
            self.addCleanup(self.flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def flavor_clean_up(self, flavor_id):
            resp, body = self.flavor_client.delete_flavor(flavor_id)
            self.assertEqual(resp.status, 202)
            self.flavor_client.wait_for_resource_deletion(flavor_id)

        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()

        admin_pass = self.image_ssh_password

        resp, server_no_eph_disk = (self.create_test_server(
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_no_eph_disk_id))
        resp, server_with_eph_disk = (self.create_test_server(
            wait_until='ACTIVE',
            adminPass=admin_pass,
            flavor=flavor_with_eph_disk_id))
        # Get partition number of server without extra specs.
        linux_client = RemoteClient(server_no_eph_disk, self.ssh_user,
                                    self.password)
        partition_num = len(linux_client.get_partitions())

        linux_client = RemoteClient(server_with_eph_disk, self.ssh_user,
                                    self.password)
        self.assertEqual(partition_num + 1, linux_client.get_partitions())
コード例 #9
0
    def test_verify_created_server_ephemeral_disk(self):
        # Verify that the ephemeral disk is created when creating server

        def create_flavor_with_extra_specs(self):
            flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
            flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor with extra specs
            resp, flavor = (self.flavor_client.
                            create_flavor(flavor_with_eph_disk_name,
                                          ram, vcpus, disk,
                                          flavor_with_eph_disk_id,
                                          ephemeral=1))
            self.addCleanup(self.flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def create_flavor_without_extra_specs(self):
            flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
            flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)

            ram = 64
            vcpus = 1
            disk = 0

            # Create a flavor without extra specs
            resp, flavor = (self.flavor_client.
                            create_flavor(flavor_no_eph_disk_name,
                                          ram, vcpus, disk,
                                          flavor_no_eph_disk_id))
            self.addCleanup(self.flavor_clean_up, flavor['id'])
            self.assertEqual(200, resp.status)

            return flavor['id']

        def flavor_clean_up(self, flavor_id):
            resp, body = self.flavor_client.delete_flavor(flavor_id)
            self.assertEqual(resp.status, 202)
            self.flavor_client.wait_for_resource_deletion(flavor_id)

        flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
        flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()

        admin_pass = self.image_ssh_password

        resp, server_no_eph_disk = (self.
                                    create_test_server(
                                    wait_until='ACTIVE',
                                    adminPass=admin_pass,
                                    flavor=flavor_no_eph_disk_id))
        resp, server_with_eph_disk = (self.create_test_server(
                                      wait_until='ACTIVE',
                                      adminPass=admin_pass,
                                      flavor=flavor_with_eph_disk_id))
        # Get partition number of server without extra specs.
        linux_client = RemoteClient(server_no_eph_disk,
                                    self.ssh_user, self.password)
        partition_num = len(linux_client.get_partitions())

        linux_client = RemoteClient(server_with_eph_disk,
                                    self.ssh_user, self.password)
        self.assertEqual(partition_num + 1, linux_client.get_partitions())
コード例 #10
0
ファイル: test_minimum_basic.py プロジェクト: monroid/tempest
class TestMinimumBasicScenario(manager.OfficialClientTest):

    """
    This is a basic minimum scenario test.

    This test below:
    * across the multiple components
    * as a regular user
    * with and without optional parameters
    * check command outputs

    """

    def _wait_for_server_status(self, status):
        server_id = self.server.id
        self.status_timeout(
            self.compute_client.servers, server_id, status)

    def _wait_for_volume_status(self, status):
        volume_id = self.volume.id
        self.status_timeout(
            self.volume_client.volumes, volume_id, status)

    def _image_create(self, name, fmt, path, properties={}):
        name = rand_name('%s-' % name)
        image_file = open(path, 'rb')
        self.addCleanup(image_file.close)
        params = {
            'name': name,
            'container_format': fmt,
            'disk_format': fmt,
            'is_public': 'True',
        }
        params.update(properties)
        image = self.image_client.images.create(**params)
        self.addCleanup(self.image_client.images.delete, image)
        self.assertEqual("queued", image.status)
        image.update(data=image_file)
        return image.id

    def glance_image_create(self):
        aki_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.aki_img_file
        ari_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.ari_img_file
        ami_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.ami_img_file
        LOG.debug("paths: ami: %s, ari: %s, aki: %s"
                  % (ami_img_path, ari_img_path, aki_img_path))
        kernel_id = self._image_create('scenario-aki', 'aki', aki_img_path)
        ramdisk_id = self._image_create('scenario-ari', 'ari', ari_img_path)
        properties = {
            'properties': {'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id}
        }
        self.image = self._image_create('scenario-ami', 'ami',
                                        path=ami_img_path,
                                        properties=properties)

    def nova_keypair_add(self):
        name = rand_name('scenario-keypair-')

        self.keypair = self.compute_client.keypairs.create(name=name)
        self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
        self.assertEqual(name, self.keypair.name)

    def nova_boot(self):
        name = rand_name('scenario-server-')
        client = self.compute_client
        flavor_id = self.config.compute.flavor_ref
        self.server = client.servers.create(name=name, image=self.image,
                                            flavor=flavor_id,
                                            key_name=self.keypair.name)
        self.addCleanup(self.compute_client.servers.delete, self.server)
        self.assertEqual(name, self.server.name)
        self._wait_for_server_status('ACTIVE')

    def nova_list(self):
        servers = self.compute_client.servers.list()
        LOG.debug("server_list:%s" % servers)
        self.assertTrue(self.server in servers)

    def nova_show(self):
        got_server = self.compute_client.servers.get(self.server)
        LOG.debug("got server:%s" % got_server)
        self.assertEqual(self.server, got_server)

    def cinder_create(self):
        name = rand_name('scenario-volume-')
        LOG.debug("volume display-name:%s" % name)
        self.volume = self.volume_client.volumes.create(size=1,
                                                        display_name=name)
        LOG.debug("volume created:%s" % self.volume.display_name)
        self._wait_for_volume_status('available')

        self.addCleanup(self.volume_client.volumes.delete, self.volume)
        self.assertEqual(name, self.volume.display_name)

    def cinder_list(self):
        volumes = self.volume_client.volumes.list()
        self.assertTrue(self.volume in volumes)

    def cinder_show(self):
        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual(self.volume, volume)

    def nova_volume_attach(self):
        attach_volume_client = self.compute_client.volumes.create_server_volume
        volume = attach_volume_client(self.server.id,
                                      self.volume.id,
                                      '/dev/vdb')
        self.assertEqual(self.volume.id, volume.id)
        self._wait_for_volume_status('in-use')

    def nova_reboot(self):
        self.server.reboot()
        self._wait_for_server_status('ACTIVE')

    def nova_floating_ip_create(self):
        self.floating_ip = self.compute_client.floating_ips.create()
        self.addCleanup(self.floating_ip.delete)

    def nova_floating_ip_add(self):
        self.server.add_floating_ip(self.floating_ip)

    def nova_security_group_rule_create(self):
        sgs = self.compute_client.security_groups.list()
        for sg in sgs:
            if sg.name == 'default':
                secgroup = sg

        ruleset = {
            # ssh
            'ip_protocol': 'tcp',
            'from_port': 22,
            'to_port': 22,
            'cidr': '0.0.0.0/0',
            'group_id': None
        }
        sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
                                                                  **ruleset)
        self.addCleanup(self.compute_client.security_group_rules.delete,
                        sg_rule.id)

    def ssh_to_server(self):
        username = self.config.scenario.ssh_user
        self.linux_client = RemoteClient(self.floating_ip.ip,
                                         username,
                                         pkey=self.keypair.private_key)

    def check_partitions(self):
        partitions = self.linux_client.get_partitions()
        self.assertEqual(1, partitions.count('vdb'))

    def nova_volume_detach(self):
        detach_volume_client = self.compute_client.volumes.delete_server_volume
        detach_volume_client(self.server.id, self.volume.id)
        self._wait_for_volume_status('available')

        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual('available', volume.status)

    def test_minimum_basic_scenario(self):
        self.glance_image_create()
        self.nova_keypair_add()
        self.nova_boot()
        self.nova_list()
        self.nova_show()
        self.cinder_create()
        self.cinder_list()
        self.cinder_show()
        self.nova_volume_attach()
        self.cinder_show()
        self.nova_reboot()

        self.nova_floating_ip_create()
        self.nova_floating_ip_add()
        self.nova_security_group_rule_create()
        self.ssh_to_server()
        self.check_partitions()

        self.nova_volume_detach()
コード例 #11
0
ファイル: test_minimum_basic.py プロジェクト: rockyg/tempest
class TestMinimumBasicScenario(manager.OfficialClientTest):

    """
    This is a basic minimum scenario test.

    This test below:
    * across the multiple components
    * as a regular user
    * with and without optional parameters
    * check command outputs

    """

    def _wait_for_server_status(self, status):
        server_id = self.server.id
        self.status_timeout(self.compute_client.servers, server_id, status)

    def _wait_for_volume_status(self, status):
        volume_id = self.volume.id
        self.status_timeout(self.volume_client.volumes, volume_id, status)

    def _image_create(self, name, fmt, path, properties={}):
        name = rand_name("%s-" % name)
        image_file = open(path, "rb")
        self.addCleanup(image_file.close)
        params = {"name": name, "container_format": fmt, "disk_format": fmt, "is_public": "True"}
        params.update(properties)
        image = self.image_client.images.create(**params)
        self.addCleanup(self.image_client.images.delete, image)
        self.assertEqual("queued", image.status)
        image.update(data=image_file)
        return image.id

    def glance_image_create(self):
        aki_img_path = self.config.scenario.img_dir + "/" + self.config.scenario.aki_img_file
        ari_img_path = self.config.scenario.img_dir + "/" + self.config.scenario.ari_img_file
        ami_img_path = self.config.scenario.img_dir + "/" + self.config.scenario.ami_img_file
        LOG.debug("paths: ami: %s, ari: %s, aki: %s" % (ami_img_path, ari_img_path, aki_img_path))
        kernel_id = self._image_create("scenario-aki", "aki", aki_img_path)
        ramdisk_id = self._image_create("scenario-ari", "ari", ari_img_path)
        properties = {"properties": {"kernel_id": kernel_id, "ramdisk_id": ramdisk_id}}
        self.image = self._image_create("scenario-ami", "ami", path=ami_img_path, properties=properties)

    def nova_keypair_add(self):
        name = rand_name("scenario-keypair-")

        self.keypair = self.compute_client.keypairs.create(name=name)
        self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
        self.assertEqual(name, self.keypair.name)

    def nova_boot(self):
        name = rand_name("scenario-server-")
        client = self.compute_client
        flavor_id = self.config.compute.flavor_ref
        self.server = client.servers.create(name=name, image=self.image, flavor=flavor_id, key_name=self.keypair.name)
        self.addCleanup(self.compute_client.servers.delete, self.server)
        self.assertEqual(name, self.server.name)
        self._wait_for_server_status("ACTIVE")

    def nova_list(self):
        servers = self.compute_client.servers.list()
        LOG.debug("server_list:%s" % servers)
        self.assertTrue(self.server in servers)

    def nova_show(self):
        got_server = self.compute_client.servers.get(self.server)
        LOG.debug("got server:%s" % got_server)
        self.assertEqual(self.server, got_server)

    def cinder_create(self):
        name = rand_name("scenario-volume-")
        LOG.debug("volume display-name:%s" % name)
        self.volume = self.volume_client.volumes.create(size=1, display_name=name)
        LOG.debug("volume created:%s" % self.volume.display_name)
        self._wait_for_volume_status("available")

        self.addCleanup(self.volume_client.volumes.delete, self.volume)
        self.assertEqual(name, self.volume.display_name)

    def cinder_list(self):
        volumes = self.volume_client.volumes.list()
        self.assertTrue(self.volume in volumes)

    def cinder_show(self):
        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual(self.volume, volume)

    def nova_volume_attach(self):
        attach_volume_client = self.compute_client.volumes.create_server_volume
        volume = attach_volume_client(self.server.id, self.volume.id, "/dev/vdb")
        self.assertEqual(self.volume.id, volume.id)
        self._wait_for_volume_status("in-use")

    def nova_reboot(self):
        self.server.reboot()
        self._wait_for_server_status("ACTIVE")

    def nova_floating_ip_create(self):
        self.floating_ip = self.compute_client.floating_ips.create()
        self.addCleanup(self.floating_ip.delete)

    def nova_floating_ip_add(self):
        self.server.add_floating_ip(self.floating_ip)

    def nova_security_group_rule_create(self):
        sgs = self.compute_client.security_groups.list()
        for sg in sgs:
            if sg.name == "default":
                secgroup = sg

        ruleset = {
            # ssh
            "ip_protocol": "tcp",
            "from_port": 22,
            "to_port": 22,
            "cidr": "0.0.0.0/0",
            "group_id": None,
        }
        sg_rule = self.compute_client.security_group_rules.create(secgroup.id, **ruleset)
        self.addCleanup(self.compute_client.security_group_rules.delete, sg_rule.id)

    def ssh_to_server(self):
        username = self.config.scenario.ssh_user
        self.linux_client = RemoteClient(self.floating_ip.ip, username, pkey=self.keypair.private_key)

    def check_partitions(self):
        partitions = self.linux_client.get_partitions()
        self.assertEqual(1, partitions.count("vdb"))

    def nova_volume_detach(self):
        detach_volume_client = self.compute_client.volumes.delete_server_volume
        detach_volume_client(self.server.id, self.volume.id)
        self._wait_for_volume_status("available")

        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual("available", volume.status)

    def test_minimum_basic_scenario(self):
        self.glance_image_create()
        self.nova_keypair_add()
        self.nova_boot()
        self.nova_list()
        self.nova_show()
        self.cinder_create()
        self.cinder_list()
        self.cinder_show()
        self.nova_volume_attach()
        self.cinder_show()
        self.nova_reboot()

        self.nova_floating_ip_create()
        self.nova_floating_ip_add()
        self.nova_security_group_rule_create()
        self.ssh_to_server()
        self.check_partitions()

        self.nova_volume_detach()
コード例 #12
0
class TestMinimumBasicScenario(manager.OfficialClientTest):

    """
    This is a basic minimum scenario test.

    This test below:
    * across the multiple components
    * as a regular user
    * with and without optional parameters
    * check command outputs

    """

    def _wait_for_server_status(self, status):
        server_id = self.server.id
        self.status_timeout(
            self.compute_client.servers, server_id, status)

    def _wait_for_volume_status(self, status):
        volume_id = self.volume.id
        self.status_timeout(
            self.volume_client.volumes, volume_id, status)

    def _image_create(self, name, fmt, path, properties={}):
        name = rand_name('%s-' % name)
        image_file = open(path, 'rb')
        self.addCleanup(image_file.close)
        params = {
            'name': name,
            'container_format': fmt,
            'disk_format': fmt,
            'is_public': 'True',
        }
        params.update(properties)
        image = self.image_client.images.create(**params)
        self.addCleanup(self.image_client.images.delete, image)
        self.assertEqual("queued", image.status)
        image.update(data=image_file)
        return image.id

    def glance_image_create(self):
        aki_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.aki_img_file
        ari_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.ari_img_file
        ami_img_path = self.config.scenario.img_dir + "/" + \
            self.config.scenario.ami_img_file
        LOG.debug("paths: ami: %s, ari: %s, aki: %s"
                  % (ami_img_path, ari_img_path, aki_img_path))
        kernel_id = self._image_create('scenario-aki', 'aki', aki_img_path)
        ramdisk_id = self._image_create('scenario-ari', 'ari', ari_img_path)
        properties = {
            'properties': {'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id}
        }
        self.image = self._image_create('scenario-ami', 'ami',
                                        path=ami_img_path,
                                        properties=properties)

    def nova_keypair_add(self):
        name = rand_name('scenario-keypair-')

        self.keypair = self.compute_client.keypairs.create(name=name)
        self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
        self.assertEqual(name, self.keypair.name)

    def nova_boot(self):
        name = rand_name('scenario-server-')
        client = self.compute_client
        flavor_id = self.config.compute.flavor_ref
        self.server = client.servers.create(name=name, image=self.image,
                                            flavor=flavor_id,
                                            key_name=self.keypair.name)
        self.addCleanup(self.compute_client.servers.delete, self.server)
        self.assertEqual(name, self.server.name)
        self._wait_for_server_status('ACTIVE')

    def nova_list(self):
        servers = self.compute_client.servers.list()
        LOG.debug("server_list:%s" % servers)
        self.assertIn(self.server, servers)

    def nova_show(self):
        got_server = self.compute_client.servers.get(self.server)
        LOG.debug("got server:%s" % got_server)
        self.assertEqual(self.server, got_server)

    def cinder_create(self):
        name = rand_name('scenario-volume-')
        LOG.debug("volume display-name:%s" % name)
        self.volume = self.volume_client.volumes.create(size=1,
                                                        display_name=name)
        LOG.debug("volume created:%s" % self.volume.display_name)
        self._wait_for_volume_status('available')

        self.addCleanup(self.volume_client.volumes.delete, self.volume)
        self.assertEqual(name, self.volume.display_name)

    def cinder_list(self):
        volumes = self.volume_client.volumes.list()
        self.assertIn(self.volume, volumes)

    def cinder_show(self):
        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual(self.volume, volume)

    def nova_volume_attach(self):
        attach_volume_client = self.compute_client.volumes.create_server_volume
        volume = attach_volume_client(self.server.id,
                                      self.volume.id,
                                      '/dev/vdb')
        self.assertEqual(self.volume.id, volume.id)
        self._wait_for_volume_status('in-use')

    def nova_reboot(self):
        self.server.reboot()
        self._wait_for_server_status('ACTIVE')

    def nova_floating_ip_create(self):
        self.floating_ip = self.compute_client.floating_ips.create()
        self.addCleanup(self.floating_ip.delete)

    def nova_floating_ip_add(self):
        self.server.add_floating_ip(self.floating_ip)

    def nova_security_group_rule_create(self):
        sgs = self.compute_client.security_groups.list()
        for sg in sgs:
            if sg.name == 'default':
                secgroup = sg

        ruleset = {
            # ssh
            'ip_protocol': 'tcp',
            'from_port': 22,
            'to_port': 22,
            'cidr': '0.0.0.0/0',
            'group_id': None
        }
        sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
                                                                  **ruleset)
        self.addCleanup(self.compute_client.security_group_rules.delete,
                        sg_rule.id)

    def ssh_to_server(self):
        username = self.config.scenario.ssh_user
        self.linux_client = RemoteClient(self.floating_ip.ip,
                                         username,
                                         pkey=self.keypair.private_key)

    def check_partitions(self):
        partitions = self.linux_client.get_partitions()
        self.assertEqual(1, partitions.count('vdb'))

    def nova_volume_detach(self):
        detach_volume_client = self.compute_client.volumes.delete_server_volume
        detach_volume_client(self.server.id, self.volume.id)
        self._wait_for_volume_status('available')

        volume = self.volume_client.volumes.get(self.volume.id)
        self.assertEqual('available', volume.status)

    def test_minimum_basic_scenario(self):
        self.glance_image_create()
        self.nova_keypair_add()
        self.nova_boot()
        self.nova_list()
        self.nova_show()
        self.cinder_create()
        self.cinder_list()
        self.cinder_show()
        self.nova_volume_attach()
        self.cinder_show()
        self.nova_reboot()

        self.nova_floating_ip_create()
        self.nova_floating_ip_add()
        self.nova_security_group_rule_create()
        self.ssh_to_server()
        self.check_partitions()

        self.nova_volume_detach()