Exemplo n.º 1
0
    def destroy_reservation(cls, reservation):
        """Terminate instances in a reservation, just for teardown."""
        exc_num = 0

        def _instance_state():
            try:
                instance.update(validate=True)
            except ValueError:
                return "_GONE"
            except exception.EC2ResponseError as exc:
                if cls.ec2_error_code.\
                        client.InvalidInstanceID.NotFound.match(exc) is None:
                    return "_GONE"
                # NOTE(afazekas): incorrect code,
                # but the resource must be destoreyd
                if exc.error_code == "InstanceNotFound":
                    return "_GONE"

            return instance.state

        for instance in reservation.instances:
            try:
                instance.terminate()
                wait.re_search_wait(_instance_state, "_GONE")
            except BaseException:
                LOG.exception("Failed to terminate instance %s " % instance)
                exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 2
0
    def destroy_volume_wait(cls, volume):
        """Delete volume, tries to detach first.
           Use just for teardown!
        """
        exc_num = 0
        snaps = volume.snapshots()
        if len(snaps):
            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                         map(snaps.id, snaps))

        # NOTE(afazekas): detaching/attching not valid EC2 status
        def _volume_state():
            volume.update(validate=True)
            try:
                if volume.status != "available":
                    volume.detach(force=True)
            except BaseException:
                LOG.exception("Failed to detach volume %s" % volume)
                # exc_num += 1 "nonlocal" not in python2
            return volume.status

        try:
            wait.re_search_wait(_volume_state, "available")
            # not validates status
            LOG.info(_volume_state())
            volume.delete()
        except BaseException:
            LOG.exception("Failed to delete volume %s" % volume)
            exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 3
0
    def destroy_reservation(cls, reservation):
        """Terminate instances in a reservation, just for teardown."""
        exc_num = 0

        def _instance_state():
            try:
                instance.update(validate=True)
            except ValueError:
                return "_GONE"
            except exception.EC2ResponseError as exc:
                if cls.ec2_error_code.\
                        client.InvalidInstanceID.NotFound.match(exc):
                    return "_GONE"
                # NOTE(afazekas): incorrect code,
                # but the resource must be destoreyd
                if exc.error_code == "InstanceNotFound":
                    return "_GONE"

            return instance.state

        for instance in reservation.instances:
            try:
                instance.terminate()
                re_search_wait(_instance_state, "_GONE")
            except BaseException as exc:
                LOG.exception(exc)
                exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 4
0
    def destroy_volume_wait(cls, volume):
        """Delete volume, tryies to detach first.
           Use just for teardown!
        """
        exc_num = 0
        snaps = volume.snapshots()
        if len(snaps):
            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                         map(snaps.id, snaps))

        # NOTE(afazekas): detaching/attching not valid EC2 status
        def _volume_state():
            volume.update(validate=True)
            try:
                if volume.status != "available":
                    volume.detach(force=True)
            except BaseException as exc:
                LOG.exception(exc)
                # exc_num += 1 "nonlocal" not in python2
            return volume.status

        try:
            re_search_wait(_volume_state, "available")  # not validates status
            LOG.info(_volume_state())
            volume.delete()
        except BaseException as exc:
            LOG.exception(exc)
            exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 5
0
    def destroy_volume_wait(cls, volume):
        """Delete volume, tries to detach first.
           Use just for teardown!
        """
        exc_num = 0
        snaps = volume.snapshots()
        if len(snaps):
            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                         map(snaps.id, snaps))

        # NOTE(afazekas): detaching/attaching not valid EC2 status
        def _volume_state():
            volume.update(validate=True)
            try:
                # NOTE(gmann): Make sure volume is attached.
                # Checking status as 'not "available"' is not enough to make
                # sure volume is attached as it can be in "error" state
                if volume.status == "in-use":
                    volume.detach(force=True)
            except BaseException:
                LOG.exception("Failed to detach volume %s" % volume)
                # exc_num += 1 "nonlocal" not in python2
            return volume.status

        try:
            wait.re_search_wait(_volume_state, "available")
            # not validates status
            LOG.info(_volume_state())
            volume.delete()
        except BaseException:
            LOG.exception("Failed to delete volume %s" % volume)
            exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 6
0
    def destroy_volume_wait(cls, volume):
        """Delete volume, tries to detach first.
           Use just for teardown!
        """
        exc_num = 0
        snaps = volume.snapshots()
        if len(snaps):
            LOG.critical("%s Volume has %s snapshot(s)", volume.id,
                         map(snaps.id, snaps))

        # NOTE(afazekas): detaching/attching not valid EC2 status
        def _volume_state():
            volume.update(validate=True)
            try:
                # NOTE(gmann): Make sure volume is attached.
                # Checking status as 'not "available"' is not enough to make
                # sure volume is attached as it can be in "error" state
                if volume.status == "in-use":
                    volume.detach(force=True)
            except BaseException:
                LOG.exception("Failed to detach volume %s" % volume)
                # exc_num += 1 "nonlocal" not in python2
            return volume.status

        try:
            wait.re_search_wait(_volume_state, "available")
            # not validates status
            LOG.info(_volume_state())
            volume.delete()
        except BaseException:
            LOG.exception("Failed to delete volume %s" % volume)
            exc_num += 1
        if exc_num:
            raise exceptions.TearDownException(num=exc_num)
Exemplo n.º 7
0
    def test_001_attach_volume(self):
        """Attach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")

        self._start_test()

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = self.ctx.instance.placement
        volume = self.ec2_client.create_volume(self.volume_size, zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        self.ctx.volume = volume
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
        volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)

        # NOTE(apavlov): "attaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "in-use")
        boto_wait.re_search_wait(self._volume_state, "in-use")

        boto_wait.state_wait(self._part_state, 1)
        part_lines_new = self.ctx.ssh.get_partitions().split('\n')
        volume_name = utils.detect_new_volume(self.ctx.part_lines,
                                              part_lines_new)
        self.ctx.part_lines = part_lines_new

        self._end_test("Create and attach volume")

        self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
                                  "&& sudo mkfs.ext3 /dev/" + volume_name)
        self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
                                  "&& sudo mount /dev/" + volume_name +
                                  " /vol")
        self.ctx.volume_ready = True

        self._check_test()
    def test_001_attach_volume(self):
        """Attach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")

        self._start_test()

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = self.ctx.instance.placement
        volume = self.ec2_client.create_volume(self.volume_size, zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        self.ctx.volume = volume
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
        volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)

        # NOTE(apavlov): "attaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "in-use")
        boto_wait.re_search_wait(self._volume_state, "in-use")

        boto_wait.state_wait(self._part_state, 1)
        part_lines_new = self.ctx.ssh.get_partitions().split('\n')
        volume_name = utils.detect_new_volume(self.ctx.part_lines,
                                              part_lines_new)
        self.ctx.part_lines = part_lines_new

        self._end_test("Create and attach volume")

        self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
            "&& sudo mkfs.ext3 /dev/" + volume_name)
        self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
            "&& sudo mount /dev/" + volume_name + " /vol")
        self.ctx.volume_ready = True

        self._check_test()
Exemplo n.º 9
0
    def test_005_detach_volume(self):
        """Detach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")
        if not self.ctx.volume_ready:
            raise self.skipException("Volume preparation failed")

        self._start_test()

        self.ctx.ssh.exec_command("sudo umount /vol")

        self.ctx.volume.detach()

        # NOTE(apavlov): "detaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "available")
        boto_wait.re_search_wait(self._volume_state, "available")

        self._end_test("Detach volume")

        boto_wait.state_wait(self._part_state, -1)

        self._check_test()
    def test_005_detach_volume(self):
        """Detach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")
        if not self.ctx.volume_ready:
            raise self.skipException("Volume preparation failed")

        self._start_test()

        self.ctx.ssh.exec_command("sudo umount /vol")

        self.ctx.volume.detach()

        # NOTE(apavlov): "detaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "available")
        boto_wait.re_search_wait(self._volume_state, "available")

        self._end_test("Detach volume")

        boto_wait.state_wait(self._part_state, -1)

        self._check_test()
Exemplo n.º 11
0
    def test_integration_1(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup-")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))
        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(1, self.zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports availble before it is available

        ssh = RemoteClient(address.public_ip,
                           CONF.compute.ssh_user,
                           pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output -")
        resp = ssh.write_to_console(text)
        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(_volume_state, "available")
        re_search_wait(_volume_state, "available")
        LOG.info("Volume %s state: %s", volume.id, volume.status)

        state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
Exemplo n.º 12
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s',
                          instance.id, console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
Exemplo n.º 13
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(
            sec_group_name, group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="icmp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=-1,
                                                     to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="tcp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=22,
                                                     to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name, ))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation, reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s', instance.id,
                          console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
Exemplo n.º 14
0
    def _run_scenario(self, scenario_func, snapshot=None):
        # NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
        reservation = self.ec2_client.run_instances(
            self.image_id,
            instance_type=self.instance_type,
            key_name=self.keypair.name,
            security_groups=(self.sec_group_name, ))
        self.addResourceCleanUp(self.destroy_reservation, reservation)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        # NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = instance.placement
        volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        ip_address = self._prepare_public_ip(instance)
        ssh = remote_client.RemoteClient(ip_address,
                                         self.ssh_user,
                                         pkey=self.keypair.material)

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/" + self.volume_attach_name)

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        boto_wait.re_search_wait(_volume_state, "in-use")

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if len(current) > len(part_lines):
                return 1
            if len(current) < len(part_lines):
                return -1
            return 0

        boto_wait.state_wait(_part_state, 1)
        part_lines_new = ssh.get_partitions().split('\n')
        self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
        part_lines = part_lines_new

        self._correct_ns_if_needed(ssh)

        snapshot = scenario_func(ssh, volume.id)

        # NOTE(apavlov): stop this instance(imagine that it will be used)
        instance.stop()
        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")

        return snapshot
    def _run_scenario(self, scenario_func, snapshot=None):
        # NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
        reservation = self.ec2_client.run_instances(self.image_id,
            instance_type=self.instance_type,
            key_name=self.keypair.name,
            security_groups=(self.sec_group_name,))
        self.addResourceCleanUp(self.destroy_reservation, reservation)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        # NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = instance.placement
        volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        ip_address = self._prepare_public_ip(instance)
        ssh = remote_client.RemoteClient(ip_address,
                                         self.ssh_user,
                                         pkey=self.keypair.material)

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/" + self.volume_attach_name)

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        boto_wait.re_search_wait(_volume_state, "in-use")

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if len(current) > len(part_lines):
                return 1
            if len(current) < len(part_lines):
                return -1
            return 0

        boto_wait.state_wait(_part_state, 1)
        part_lines_new = ssh.get_partitions().split('\n')
        self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
        part_lines = part_lines_new

        self._correct_ns_if_needed(ssh)

        snapshot = scenario_func(ssh, volume.id)

        # NOTE(apavlov): stop this instance(imagine that it will be used)
        instance.stop()
        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")

        return snapshot