Пример #1
0
    def destroy_volume_wait(cls, volume):
        """Delete volume, tries to detach first.

           Use just for teardown!
        """
        exc_num = 0
        snaps = volume.snapshots()
        if len(snaps):
            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                         map(snaps.id, snaps))

        # NOTE(afazekas): detaching/attaching not valid EC2 status
        def _volume_state():
            volume.update(validate=True)
            try:
                # NOTE(gmann): Make sure volume is attached.
                # Checking status as 'not "available"' is not enough to make
                # sure volume is attached as it can be in "error" state
                if volume.status == "in-use":
                    volume.detach(force=True)
            except BaseException:
                LOG.exception("Failed to detach volume %s" % volume)
                # exc_num += 1 "nonlocal" not in python2
            return volume.status

        try:
            wait.re_search_wait(_volume_state, "available")
            # not validates status
            LOG.info(_volume_state())
            volume.delete()
        except BaseException:
            LOG.exception("Failed to delete volume %s" % volume)
            exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Пример #2
0
    def destroy_reservation(cls, reservation):
        """Terminate instances in a reservation, just for teardown."""
        exc_num = 0

        def _instance_state():
            try:
                instance.update(validate=True)
            except ValueError:
                return "_GONE"
            except exception.EC2ResponseError as exc:
                if cls.ec2_error_code.\
                        client.InvalidInstanceID.NotFound.match(exc) is None:
                    return "_GONE"
                # NOTE(afazekas): incorrect code,
                # but the resource must be destroyed
                if exc.error_code == "InstanceNotFound":
                    return "_GONE"

            return instance.state

        for instance in reservation.instances:
            try:
                instance.terminate()
                wait.re_search_wait(_instance_state, "_GONE")
            except BaseException:
                LOG.exception("Failed to terminate instance %s " % instance)
                exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Пример #3
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s',
                          instance.id, console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDisassociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
Пример #4
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(
            sec_group_name, group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="icmp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=-1,
                                                     to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="tcp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=22,
                                                     to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name, ))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation, reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s', instance.id,
                          console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDisassociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")