def destroy_volume_wait(cls, volume): """Delete volume, tryies to detach first. Use just for teardown! """ exc_num = 0 snaps = volume.snapshots() if len(snaps): LOG.critical("%s Volume has %s snapshot(s)", volume.id, map(snaps.id, snaps)) #Note(afazekas): detaching/attching not valid EC2 status def _volume_state(): volume.update(validate=True) try: if volume.status != "available": volume.detach(force=True) except BaseException as exc: LOG.exception(exc) #exc_num += 1 "nonlocal" not in python2 return volume.status try: re_search_wait(_volume_state, "available") # not validates status LOG.info(_volume_state()) volume.delete() except BaseException as exc: LOG.exception(exc) exc_num += 1 if exc_num: raise exceptions.TearDownException(num=exc_num)
def destroy_reservation(cls, reservation): """Terminate instances in a reservation, just for teardown.""" exc_num = 0 def _instance_state(): try: instance.update(validate=True) except ValueError: return "_GONE" except exception.EC2ResponseError as exc: if cls.ec2_error_code.\ client.InvalidInstanceID.NotFound.match(exc): return "_GONE" #NOTE(afazekas): incorrect code, # but the resource must be destoreyd if exc.error_code == "InstanceNotFound": return "_GONE" return instance.state for instance in reservation.instances: try: instance.terminate() re_search_wait(_instance_state, "_GONE") except BaseException as exc: LOG.exception(exc) exc_num += 1 if exc_num: raise exceptions.TearDownException(num=exc_num)
def destroy_reservation(cls, reservation): """Terminate instances in a reservation, just for teardown.""" exc_num = 0 def _instance_state(): try: instance.update(validate=True) except ValueError: return "terminated" return instance.state for instance in reservation.instances: try: instance.terminate() re_search_wait(_instance_state, "terminated") except BaseException as exc: LOG.exception(exc) exc_num += 1 if exc_num: raise TearDownException(num=exc_num)
def test_integration_1(self): # EC2 1. integration test (not strict) image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"]) sec_group_name = rand_name("securitygroup-") group_desc = sec_group_name + " security group description " security_group = self.ec2_client.create_security_group(sec_group_name, group_desc) self.addResourceCleanUp(self.destroy_security_group_wait, security_group) self.ec2_client.authorize_security_group(sec_group_name, ip_protocol="icmp", cidr_ip="0.0.0.0/0", from_port=-1, to_port=-1) self.ec2_client.authorize_security_group(sec_group_name, ip_protocol="tcp", cidr_ip="0.0.0.0/0", from_port=22, to_port=22) reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"], ramdisk_id=self.images["ari"]["image_id"], instance_type=self.instance_type, key_name=self.keypair_name, security_groups=(sec_group_name,)) self.addResourceCleanUp(self.destroy_reservation, reservation) volume = self.ec2_client.create_volume(1, self.zone) self.addResourceCleanUp(self.destroy_volume_wait, volume) instance = reservation.instances[0] def _instance_state(): instance.update(validate=True) return instance.state def _volume_state(): volume.update(validate=True) return volume.status LOG.info("state: %s", instance.state) if instance.state != "running": self.assertInstanceStateWait(_instance_state, "running") address = self.ec2_client.allocate_address() rcuk_a = self.addResourceCleanUp(address.delete) address.associate(instance.id) rcuk_da = self.addResourceCleanUp(address.disassociate) #TODO(afazekas): ping test. dependecy/permission ? self.assertVolumeStatusWait(_volume_state, "available") #NOTE(afazekas): it may be reports availble before it is available ssh = RemoteClient(address.public_ip, self.os.config.compute.ssh_user, pkey=self.keypair.material) text = rand_name("Pattern text for console output -") resp = ssh.write_to_console(text) self.assertFalse(resp) def _output(): output = instance.get_console_output() return output.output re_search_wait(_output, text) part_lines = ssh.get_partitions().split('\n') # "attaching" invalid EC2 state ! #1074901 volume.attach(instance.id, "/dev/vdh") #self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901 re_search_wait(_volume_state, "in-use") #NOTE(afazekas): Different Hypervisor backends names # differently the devices, # now we just test is the partition number increased/decrised def _part_state(): current = ssh.get_partitions().split('\n') if current > part_lines: return 'INCREASE' if current < part_lines: return 'DECREASE' return 'EQUAL' state_wait(_part_state, 'INCREASE') part_lines = ssh.get_partitions().split('\n') #TODO(afazekas): Resource compare to the flavor settings volume.detach() # "detaching" invalid EC2 status #1074901 #self.assertVolumeStatusWait(_volume_state, "available") re_search_wait(_volume_state, "available") LOG.info("Volume %s state: %s", volume.id, volume.status) state_wait(_part_state, 'DECREASE') instance.stop() address.disassociate() self.assertAddressDissasociatedWait(address) self.cancelResourceCleanUp(rcuk_da) address.release() self.assertAddressReleasedWait(address) self.cancelResourceCleanUp(rcuk_a) LOG.info("state: %s", instance.state) if instance.state != "stopped": self.assertInstanceStateWait(_instance_state, "stopped")
def test_integration_1(self): """EC2 1. integration test (not strict)""" image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"]) sec_group_name = rand_name("securitygroup-") group_desc = sec_group_name + " security group description " security_group = self.ec2_client.create_security_group( sec_group_name, group_desc) self.addResourceCleanUp(self.destroy_security_group_wait, security_group) self.ec2_client.authorize_security_group(sec_group_name, ip_protocol="icmp", cidr_ip="0.0.0.0/0", from_port=-1, to_port=-1) self.ec2_client.authorize_security_group(sec_group_name, ip_protocol="tcp", cidr_ip="0.0.0.0/0", from_port=22, to_port=22) reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"], ramdisk_id=self.images["ari"]["image_id"], instance_type=self.instance_type, key_name=self.keypair_name, security_groups=(sec_group_name, )) self.addResourceCleanUp(self.destroy_reservation, reservation) volume = self.ec2_client.create_volume(1, self.zone) self.addResourceCleanUp(self.destroy_volume_wait, volume) instance = reservation.instances[0] def _instance_state(): instance.update(validate=True) return instance.state def _volume_state(): volume.update(validate=True) return volume.status LOG.info("state: %s", instance.state) if instance.state != "running": self.assertInstanceStateWait(_instance_state, "running") address = self.ec2_client.allocate_address() rcuk_a = self.addResourceCleanUp(address.delete) address.associate(instance.id) rcuk_da = self.addResourceCleanUp(address.disassociate) #TODO(afazekas): ping test. dependecy/permission ? self.assertVolumeStatusWait(_volume_state, "available") #NOTE(afazekas): it may be reports availble before it is available ssh = RemoteClient(address.public_ip, self.os.config.compute.ssh_user, pkey=self.keypair.material) text = rand_name("Pattern text for console output -") resp = ssh.write_to_console(text) self.assertFalse(resp) def _output(): output = instance.get_console_output() return output.output re_search_wait(_output, text) part_lines = ssh.get_partitions().split('\n') # "attaching" invalid EC2 state ! #1074901 volume.attach(instance.id, "/dev/vdh") #self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901 re_search_wait(_volume_state, "in-use") #NOTE(afazekas): Different Hypervisor backends names # differently the devices, # now we just test is the partition number increased/decrised def _part_state(): current = ssh.get_partitions().split('\n') if current > part_lines: return 'INCREASE' if current < part_lines: return 'DECREASE' return 'EQUAL' state_wait(_part_state, 'INCREASE') part_lines = ssh.get_partitions().split('\n') #TODO(afazekas): Resource compare to the flavor settings volume.detach() # "detaching" invalid EC2 status #1074901 #self.assertVolumeStatusWait(_volume_state, "available") re_search_wait(_volume_state, "available") LOG.info("Volume %s state: %s", volume.id, volume.status) state_wait(_part_state, 'DECREASE') instance.stop() address.disassociate() self.assertAddressDissasociatedWait(address) self.cancelResourceCleanUp(rcuk_da) address.release() self.assertAddressReleasedWait(address) self.cancelResourceCleanUp(rcuk_a) LOG.info("state: %s", instance.state) if instance.state != "stopped": self.assertInstanceStateWait(_instance_state, "stopped")