Example #1
0
    def test_register_get_deregister_ami_image(self):
        # Register and deregister ami image
        image = {
            "name": rand_name("ami-name-"),
            "location": self.bucket_name + "/" + self.ami_manifest,
            "type": "ami"
        }
        image["image_id"] = self.images_client.register_image(
            name=image["name"], image_location=image["location"])
        #Note(afazekas): delete_snapshot=True might trigger boto lib? bug
        image["cleanUp"] = self.addResourceCleanUp(
            self.images_client.deregister_image, image["image_id"])
        self.assertEqual(image["image_id"][0:3], image["type"])
        retrieved_image = self.images_client.get_image(image["image_id"])
        self.assertTrue(retrieved_image.name == image["name"])
        self.assertTrue(retrieved_image.id == image["image_id"])
        state = retrieved_image.state
        if state != "available":

            def _state():
                retr = self.images_client.get_image(image["image_id"])
                return retr.state

            state = state_wait(_state, "available")
        self.assertEqual("available", state)
        self.images_client.deregister_image(image["image_id"])
        self.assertNotIn(image["image_id"],
                         str(self.images_client.get_all_images()))
        self.cancelResourceCleanUp(image["cleanUp"])
Example #2
0
 def test_register_get_deregister_ami_image(self):
     # Register and deregister ami image
     image = {"name": rand_name("ami-name-"),
              "location": self.bucket_name + "/" + self.ami_manifest,
              "type": "ami"}
     image["image_id"] = self.images_client.register_image(
         name=image["name"],
         image_location=image["location"])
     #Note(afazekas): delete_snapshot=True might trigger boto lib? bug
     image["cleanUp"] = self.addResourceCleanUp(
         self.images_client.deregister_image,
         image["image_id"])
     self.assertEqual(image["image_id"][0:3], image["type"])
     retrieved_image = self.images_client.get_image(image["image_id"])
     self.assertTrue(retrieved_image.name == image["name"])
     self.assertTrue(retrieved_image.id == image["image_id"])
     state = retrieved_image.state
     if state != "available":
         def _state():
             retr = self.images_client.get_image(image["image_id"])
             return retr.state
         state = state_wait(_state, "available")
     self.assertEqual("available", state)
     self.images_client.deregister_image(image["image_id"])
     self.assertNotIn(image["image_id"], str(
         self.images_client.get_all_images()))
     self.cancelResourceCleanUp(image["cleanUp"])
Example #3
0
 def state_wait_gone(self, lfunction, final_set, valid_set):
     if not isinstance(final_set, set):
         final_set = set((final_set,))
     final_set |= self.gone_set
     lfunction = self.get_lfunction_gone(lfunction)
     state = state_wait(lfunction, final_set, valid_set)
     self.assertIn(state, valid_set | self.gone_set)
     return state
Example #4
0
 def state_wait_gone(self, lfunction, final_set, valid_set):
     if not isinstance(final_set, set):
         final_set = set((final_set, ))
     final_set |= self.gone_set
     lfunction = self.get_lfunction_gone(lfunction)
     state = wait.state_wait(lfunction, final_set, valid_set)
     self.assertIn(state, valid_set | self.gone_set)
     return state
Example #5
0
    def resource_setup(cls):
        super(InstanceRunTest, cls).resource_setup()
        if not cls.conclusion['A_I_IMAGES_READY']:
            raise cls.skipException("".join(("EC2 ", cls.__name__,
                                    ": requires ami/aki/ari manifest")))
        cls.zone = CONF.boto.aws_zone
        cls.materials_path = CONF.boto.s3_materials_path
        ami_manifest = CONF.boto.ami_manifest
        aki_manifest = CONF.boto.aki_manifest
        ari_manifest = CONF.boto.ari_manifest
        cls.instance_type = CONF.boto.instance_type
        cls.bucket_name = data_utils.rand_name("s3bucket")
        cls.keypair_name = data_utils.rand_name("keypair")
        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
                               cls.keypair_name)
        bucket = cls.s3_client.create_bucket(cls.bucket_name)
        cls.addResourceCleanUp(cls.destroy_bucket,
                               cls.s3_client.connection_data,
                               cls.bucket_name)
        s3.s3_upload_dir(bucket, cls.materials_path)
        cls.images = {"ami":
                      {"name": data_utils.rand_name("ami-name"),
                       "location": cls.bucket_name + "/" + ami_manifest},
                      "aki":
                      {"name": data_utils.rand_name("aki-name"),
                       "location": cls.bucket_name + "/" + aki_manifest},
                      "ari":
                      {"name": data_utils.rand_name("ari-name"),
                       "location": cls.bucket_name + "/" + ari_manifest}}
        for image_type in ("aki", "ari"):
            image = cls.images[image_type]
            image["image_id"] = cls.ec2_client.register_image(
                name=image["name"],
                image_location=image["location"])
            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                                   image["image_id"])
        image = cls.images["ami"]
        image["image_id"] = cls.ec2_client.register_image(
            name=image["name"],
            image_location=image["location"],
            kernel_id=cls.images["aki"]["image_id"],
            ramdisk_id=cls.images["ari"]["image_id"])
        cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                               image["image_id"])

        for image in cls.images.itervalues():
            def _state():
                retr = cls.ec2_client.get_image(image["image_id"])
                return retr.state
            state = wait.state_wait(_state, "available")
            if state != "available":
                for _image in cls.images.itervalues():
                    cls.ec2_client.deregister_image(_image["image_id"])
                raise exceptions.EC2RegisterImageException(
                    image_id=image["image_id"])
Example #6
0
    def resource_setup(cls):
        super(InstanceRunTest, cls).resource_setup()
        if not cls.conclusion['A_I_IMAGES_READY']:
            raise cls.skipException("".join(("EC2 ", cls.__name__,
                                    ": requires ami/aki/ari manifest")))
        cls.zone = CONF.boto.aws_zone
        cls.materials_path = CONF.boto.s3_materials_path
        ami_manifest = CONF.boto.ami_manifest
        aki_manifest = CONF.boto.aki_manifest
        ari_manifest = CONF.boto.ari_manifest
        cls.instance_type = CONF.boto.instance_type
        cls.bucket_name = data_utils.rand_name("s3bucket")
        cls.keypair_name = data_utils.rand_name("keypair")
        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
                               cls.keypair_name)
        bucket = cls.s3_client.create_bucket(cls.bucket_name)
        cls.addResourceCleanUp(cls.destroy_bucket,
                               cls.s3_client.connection_data,
                               cls.bucket_name)
        s3.s3_upload_dir(bucket, cls.materials_path)
        cls.images = {"ami":
                      {"name": data_utils.rand_name("ami-name"),
                       "location": cls.bucket_name + "/" + ami_manifest},
                      "aki":
                      {"name": data_utils.rand_name("aki-name"),
                       "location": cls.bucket_name + "/" + aki_manifest},
                      "ari":
                      {"name": data_utils.rand_name("ari-name"),
                       "location": cls.bucket_name + "/" + ari_manifest}}
        for image_type in ("aki", "ari"):
            image = cls.images[image_type]
            image["image_id"] = cls.ec2_client.register_image(
                name=image["name"],
                image_location=image["location"])
            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                                   image["image_id"])
        image = cls.images["ami"]
        image["image_id"] = cls.ec2_client.register_image(
            name=image["name"],
            image_location=image["location"],
            kernel_id=cls.images["aki"]["image_id"],
            ramdisk_id=cls.images["ari"]["image_id"])
        cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                               image["image_id"])

        for image in cls.images.itervalues():
            def _state():
                retr = cls.ec2_client.get_image(image["image_id"])
                return retr.state
            state = wait.state_wait(_state, "available")
            if state != "available":
                for _image in cls.images.itervalues():
                    cls.ec2_client.deregister_image(_image["image_id"])
                raise exceptions.EC2RegisterImageException(
                    image_id=image["image_id"])
Example #7
0
    def setUpClass(cls):
        super(InstanceRunTest, cls).setUpClass()
        if not cls.conclusion['A_I_IMAGES_READY']:
            raise cls.skipException("".join(
                ("EC2 ", cls.__name__, ": requires ami/aki/ari manifest")))
        cls.os = clients.Manager()
        cls.s3_client = cls.os.s3_client
        cls.ec2_client = cls.os.ec2api_client
        cls.zone = cls.ec2_client.get_good_zone()
        config = cls.config
        cls.materials_path = config.boto.s3_materials_path
        ami_manifest = config.boto.ami_manifest
        aki_manifest = config.boto.aki_manifest
        ari_manifest = config.boto.ari_manifest
        cls.instance_type = config.boto.instance_type
        cls.bucket_name = rand_name("s3bucket-")
        cls.keypair_name = rand_name("keypair-")
        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
                               cls.keypair_name)
        bucket = cls.s3_client.create_bucket(cls.bucket_name)
        cls.addResourceCleanUp(cls.destroy_bucket,
                               cls.s3_client.connection_data, cls.bucket_name)
        s3_upload_dir(bucket, cls.materials_path)
        cls.images = {
            "ami": {
                "name": rand_name("ami-name-"),
                "location": cls.bucket_name + "/" + ami_manifest
            },
            "aki": {
                "name": rand_name("aki-name-"),
                "location": cls.bucket_name + "/" + aki_manifest
            },
            "ari": {
                "name": rand_name("ari-name-"),
                "location": cls.bucket_name + "/" + ari_manifest
            }
        }
        for image in cls.images.itervalues():
            image["image_id"] = cls.ec2_client.register_image(
                name=image["name"], image_location=image["location"])
            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                                   image["image_id"])

        for image in cls.images.itervalues():

            def _state():
                retr = cls.ec2_client.get_image(image["image_id"])
                return retr.state

            state = state_wait(_state, "available")
            if state != "available":
                for _image in cls.images.itervalues():
                    cls.ec2_client.deregister_image(_image["image_id"])
                raise exceptions.EC2RegisterImageException(
                    image_id=image["image_id"])
Example #8
0
    def assertAddressReleasedWait(self, address):
        def _address_delete():
            # NOTE(afazekas): the filter gives back IP
            # even if it is not associated to my tenant
            if address.public_ip not in map(lambda a: a.public_ip, self.ec2_client.get_all_addresses()):
                return "DELETED"
            return "NOTDELETED"

        state = wait.state_wait(_address_delete, "DELETED")
        self.assertEqual(state, "DELETED")
Example #9
0
 def _wait_instance_state(cls, instance, final_set):
     if not isinstance(final_set, set):
         final_set = set((final_set,))
     final_set |= cls.gone_set
     lfunction = cls.get_lfunction_gone(instance)
     state = boto_wait.state_wait(lfunction, final_set,
                                  cls.valid_instance_state)
     if state not in final_set:
         raise base.TestCasePreparationError("Error in waiting for "
             "instance(state = '%s')" % state)
Example #10
0
 def _wait_instance_state(cls, instance, final_set):
     if not isinstance(final_set, set):
         final_set = set((final_set, ))
     final_set |= cls.gone_set
     lfunction = cls.get_lfunction_gone(instance)
     state = boto_wait.state_wait(lfunction, final_set,
                                  cls.valid_instance_state)
     if state not in final_set:
         raise base.TestCasePreparationError("Error in waiting for "
                                             "instance(state = '%s')" %
                                             state)
Example #11
0
    def test_001_attach_volume(self):
        """Attach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")

        self._start_test()

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = self.ctx.instance.placement
        volume = self.ec2_client.create_volume(self.volume_size, zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        self.ctx.volume = volume
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
        volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)

        # NOTE(apavlov): "attaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "in-use")
        boto_wait.re_search_wait(self._volume_state, "in-use")

        boto_wait.state_wait(self._part_state, 1)
        part_lines_new = self.ctx.ssh.get_partitions().split('\n')
        volume_name = utils.detect_new_volume(self.ctx.part_lines,
                                              part_lines_new)
        self.ctx.part_lines = part_lines_new

        self._end_test("Create and attach volume")

        self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
                                  "&& sudo mkfs.ext3 /dev/" + volume_name)
        self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
                                  "&& sudo mount /dev/" + volume_name +
                                  " /vol")
        self.ctx.volume_ready = True

        self._check_test()
Example #12
0
    def assertAddressReleasedWait(self, address):
        def _address_delete():
            # NOTE(afazekas): the filter gives back IP
            # even if it is not associated to my tenant
            if (address.public_ip
                    not in map(lambda a: a.public_ip,
                               self.ec2_client.get_all_addresses())):
                return "DELETED"
            return "NOTDELETED"

        state = wait.state_wait(_address_delete, "DELETED")
        self.assertEqual(state, "DELETED")
    def test_001_attach_volume(self):
        """Attach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")

        self._start_test()

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = self.ctx.instance.placement
        volume = self.ec2_client.create_volume(self.volume_size, zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        self.ctx.volume = volume
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        self.ctx.part_lines = self.ctx.ssh.get_partitions().split('\n')
        volume.attach(self.ctx.instance.id, "/dev/" + self.volume_attach_name)

        # NOTE(apavlov): "attaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "in-use")
        boto_wait.re_search_wait(self._volume_state, "in-use")

        boto_wait.state_wait(self._part_state, 1)
        part_lines_new = self.ctx.ssh.get_partitions().split('\n')
        volume_name = utils.detect_new_volume(self.ctx.part_lines,
                                              part_lines_new)
        self.ctx.part_lines = part_lines_new

        self._end_test("Create and attach volume")

        self.ctx.ssh.exec_command("PATH=$PATH:/usr/sbin:/usr/bin "
            "&& sudo mkfs.ext3 /dev/" + volume_name)
        self.ctx.ssh.exec_command("sudo mkdir -m 777 /vol "
            "&& sudo mount /dev/" + volume_name + " /vol")
        self.ctx.volume_ready = True

        self._check_test()
Example #14
0
    def assertAddressDissasociatedWait(self, address):
        def _disassociate():
            cli = self.ec2_client
            addresses = cli.get_all_addresses(addresses=(address.public_ip,))
            if len(addresses) != 1:
                return "INVALID"
            if addresses[0].instance_id:
                LOG.info("%s associated to %s", address.public_ip, addresses[0].instance_id)
                return "ASSOCIATED"
            return "DISASSOCIATED"

        state = wait.state_wait(_disassociate, "DISASSOCIATED", set(("ASSOCIATED", "DISASSOCIATED")))
        self.assertEqual(state, "DISASSOCIATED")
    def setUpClass(cls):
        super(InstanceRunTest, cls).setUpClass()
        if not cls.conclusion['A_I_IMAGES_READY']:
            raise cls.skipException("".join(("EC2 ", cls.__name__,
                                    ": requires ami/aki/ari manifest")))
        cls.os = clients.Manager()
        cls.s3_client = cls.os.s3_client
        cls.ec2_client = cls.os.ec2api_client
        cls.zone = cls.ec2_client.get_good_zone()
        config = cls.config
        cls.materials_path = config.boto.s3_materials_path
        ami_manifest = config.boto.ami_manifest
        aki_manifest = config.boto.aki_manifest
        ari_manifest = config.boto.ari_manifest
        cls.instance_type = config.boto.instance_type
        cls.bucket_name = rand_name("s3bucket-")
        cls.keypair_name = rand_name("keypair-")
        cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
        cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
                               cls.keypair_name)
        bucket = cls.s3_client.create_bucket(cls.bucket_name)
        cls.addResourceCleanUp(cls.destroy_bucket,
                               cls.s3_client.connection_data,
                               cls.bucket_name)
        s3_upload_dir(bucket, cls.materials_path)
        cls.images = {"ami":
                      {"name": rand_name("ami-name-"),
                       "location": cls.bucket_name + "/" + ami_manifest},
                      "aki":
                      {"name": rand_name("aki-name-"),
                       "location": cls.bucket_name + "/" + aki_manifest},
                      "ari":
                      {"name": rand_name("ari-name-"),
                       "location": cls.bucket_name + "/" + ari_manifest}}
        for image in cls.images.itervalues():
            image["image_id"] = cls.ec2_client.register_image(
                name=image["name"],
                image_location=image["location"])
            cls.addResourceCleanUp(cls.ec2_client.deregister_image,
                                   image["image_id"])

        for image in cls.images.itervalues():
            def _state():
                retr = cls.ec2_client.get_image(image["image_id"])
                return retr.state
            state = state_wait(_state, "available")
            if state != "available":
                for _image in cls.images.itervalues():
                    cls.ec2_client.deregister_image(_image["image_id"])
                raise exceptions.EC2RegisterImageException(image_id=
                                                           image["image_id"])
Example #16
0
    def test_005_detach_volume(self):
        """Detach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")
        if not self.ctx.volume_ready:
            raise self.skipException("Volume preparation failed")

        self._start_test()

        self.ctx.ssh.exec_command("sudo umount /vol")

        self.ctx.volume.detach()

        # NOTE(apavlov): "detaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "available")
        boto_wait.re_search_wait(self._volume_state, "available")

        self._end_test("Detach volume")

        boto_wait.state_wait(self._part_state, -1)

        self._check_test()
    def test_005_detach_volume(self):
        """Detach volume"""

        if self.ctx.ssh is None:
            raise self.skipException("Booting failed")
        if not self.ctx.volume_ready:
            raise self.skipException("Volume preparation failed")

        self._start_test()

        self.ctx.ssh.exec_command("sudo umount /vol")

        self.ctx.volume.detach()

        # NOTE(apavlov): "detaching" invalid EC2 status #1074901
        self.assertVolumeStatusWait(self._volume_state, "available")
        boto_wait.re_search_wait(self._volume_state, "available")

        self._end_test("Detach volume")

        boto_wait.state_wait(self._part_state, -1)

        self._check_test()
Example #18
0
    def assertAddressDissasociatedWait(self, address):
        def _disassociate():
            cli = self.ec2_client
            addresses = cli.get_all_addresses(addresses=(address.public_ip, ))
            if len(addresses) != 1:
                return "INVALID"
            if addresses[0].instance_id:
                LOG.info("%s associated to %s", address.public_ip,
                         addresses[0].instance_id)
                return "ASSOCIATED"
            return "DISASSOCIATED"

        state = wait.state_wait(_disassociate, "DISASSOCIATED",
                                set(("ASSOCIATED", "DISASSOCIATED")))
        self.assertEqual(state, "DISASSOCIATED")
 def _sync(self):
     def check_tcpdump_is_ready():
         resp = self.ssh.exec_command("test -f tcpdump.log && echo 1 "
                                      "|| echo 0")
         return int(resp) == 1
     boto_wait.state_wait(check_tcpdump_is_ready, True)
 def _sync(self):
     def check_tcpdump_is_ready():
         resp = self.ssh.exec_command("test -f tcpdump.log && echo 1 "
                                      "|| echo 0")
         return int(resp) == 1
     boto_wait.state_wait(check_tcpdump_is_ready, True)
Example #21
0
    def test_integration_1(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup-")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))
        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(1, self.zone)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports availble before it is available

        ssh = RemoteClient(address.public_ip,
                           CONF.compute.ssh_user,
                           pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output -")
        resp = ssh.write_to_console(text)
        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(_volume_state, "available")
        re_search_wait(_volume_state, "available")
        LOG.info("Volume %s state: %s", volume.id, volume.status)

        state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
    def _run_scenario(self, scenario_func, snapshot=None):
        # NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
        reservation = self.ec2_client.run_instances(self.image_id,
            instance_type=self.instance_type,
            key_name=self.keypair.name,
            security_groups=(self.sec_group_name,))
        self.addResourceCleanUp(self.destroy_reservation, reservation)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        # NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = instance.placement
        volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        ip_address = self._prepare_public_ip(instance)
        ssh = remote_client.RemoteClient(ip_address,
                                         self.ssh_user,
                                         pkey=self.keypair.material)

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/" + self.volume_attach_name)

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        boto_wait.re_search_wait(_volume_state, "in-use")

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if len(current) > len(part_lines):
                return 1
            if len(current) < len(part_lines):
                return -1
            return 0

        boto_wait.state_wait(_part_state, 1)
        part_lines_new = ssh.get_partitions().split('\n')
        self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
        part_lines = part_lines_new

        self._correct_ns_if_needed(ssh)

        snapshot = scenario_func(ssh, volume.id)

        # NOTE(apavlov): stop this instance(imagine that it will be used)
        instance.stop()
        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")

        return snapshot
Example #23
0
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s',
                          instance.id, console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(
            sec_group_name, group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="icmp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=-1,
                                                     to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(sec_group_name,
                                                     ip_protocol="tcp",
                                                     cidr_ip="0.0.0.0/0",
                                                     from_port=22,
                                                     to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name, ))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation, reservation)
        volume = self.ec2_client.create_volume(CONF.volume.volume_size,
                                               self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output")
        try:
            resp = ssh.write_to_console(text)
        except Exception:
            if not CONF.compute_feature_enabled.console_output:
                LOG.debug('Console output not supported, cannot log')
            else:
                console_output = instance.get_console_output().output
                LOG.debug('Console output for %s\nbody=\n%s', instance.id,
                          console_output)
            raise

        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(volume, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
Example #25
0
    def _run_scenario(self, scenario_func, snapshot=None):
        # NOTE(apavlov): ec2-run-instances --key KEYPAIR IMAGE
        reservation = self.ec2_client.run_instances(
            self.image_id,
            instance_type=self.instance_type,
            key_name=self.keypair.name,
            security_groups=(self.sec_group_name, ))
        self.addResourceCleanUp(self.destroy_reservation, reservation)
        instance = reservation.instances[0]
        LOG.info("state: %s", instance.state)
        # NOTE(apavlov): wait until it runs (ec2-describe-instances INSTANCE)
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")

        # NOTE(apavlov): ec2-create-volume -z ZONE -s SIZE_GB
        zone = instance.placement
        volume = self.ec2_client.create_volume(1, zone, snapshot=snapshot)
        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        # NOTE(apavlov): wait it (ec2-describe-volumes VOLUME)
        self.assertVolumeStatusWait(volume, "available")

        ip_address = self._prepare_public_ip(instance)
        ssh = remote_client.RemoteClient(ip_address,
                                         self.ssh_user,
                                         pkey=self.keypair.material)

        # NOTE(apavlov): ec2-attach-volume -d /dev/XXX -i INSTANCE VOLUME
        # and wait until it will be available
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/" + self.volume_attach_name)

        def _volume_state():
            volume.update(validate=True)
            return volume.status

        self.assertVolumeStatusWait(_volume_state, "in-use")
        boto_wait.re_search_wait(_volume_state, "in-use")

        def _part_state():
            current = ssh.get_partitions().split('\n')
            if len(current) > len(part_lines):
                return 1
            if len(current) < len(part_lines):
                return -1
            return 0

        boto_wait.state_wait(_part_state, 1)
        part_lines_new = ssh.get_partitions().split('\n')
        self.volume_name = utils.detect_new_volume(part_lines, part_lines_new)
        part_lines = part_lines_new

        self._correct_ns_if_needed(ssh)

        snapshot = scenario_func(ssh, volume.id)

        # NOTE(apavlov): stop this instance(imagine that it will be used)
        instance.stop()
        LOG.info("state: %s", instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")

        return snapshot