class ResourceGeneration(EutesterTestCase): def __init__(self, credpath): self.tester = Eucaops(credpath=credpath) def CreateResources(self): users = self.tester.get_all_users() testers = [] for user in users: keys = self.tester.create_access_key(user_name=user['user_name'], delegate_account=user['account_name']) testers.append(Eucaops(aws_access_key_id=keys['access_key_id'], aws_secret_access_key=keys['secret_access_key'], ec2_ip=self.tester.ec2.host, s3_ip=self.tester.s3.host)) for tester in testers: import random zone = random.choice(tester.get_zones()) volume = self.tester.create_volume(size=1, azone=zone) snapshot = self.tester.create_snapshot(volume_id=volume.id) volume_from_snap = self.tester.create_volume(snapshot=snapshot, azone=zone) bucket = self.tester.create_bucket(self.tester.id_generator(12, string.ascii_lowercase + string.digits)) key = self.tester.upload_object(bucket_name= bucket.name, key_name= self.tester.id_generator(12, string.ascii_lowercase + string.digits), contents= self.tester.id_generator(200)) keypair = self.tester.add_keypair(self.tester.id_generator()) group = self.tester.add_group(self.tester.id_generator()) def run_suite(self): self.testlist = [] testlist = self.testlist testlist.append(self.create_testcase_from_method(self.CreateResources)) self.run_test_case_list(testlist)
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self, config_file="cloud.conf", password="******"): self.tester = Eucaops(config_file=config_file, password=password) self.servman = self.tester.service_manager self.tester.poll_count = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) self.group = self.tester.add_group(group_name="group-" + self.start_time) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance( keypair=self.keypair.name, group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume( self.volume) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket( self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object( self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix( self.standing_bucket_name, self.standing_key_name) def run_testcase(self, testcase_callback, **kwargs): poll_count = 20 poll_interval = 20 while (poll_count > 0): try: testcase_callback(**kwargs) break except Exception, e: self.tester.debug("Attempt failed due to: " + str(e) + "\nRetrying testcase in " + str(poll_interval)) self.tester.sleep(poll_interval) poll_count = poll_count - 1 if poll_count is 0: self.fail("Could not run an instance after " + str(poll_count) + " tries with " + str(poll_interval) + "s sleep in between")
class InstanceBasics(unittest.TestCase): def setUp(self, credpath=None): # Setup basic eutester object if credpath is None: credpath = arg_credpath self.tester = Eucaops(credpath=credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def create_attach_volume(self, instance, size): self.volume = self.tester.create_volume(instance.placement, size) device_path = "/dev/" + instance.block_device_prefix + "j" before_attach = instance.get_dev_dir() try: self.assertTrue( self.tester.attach_volume(instance, self.volume, device_path), "Failure attaching volume") except AssertionError, e: self.assertTrue(self.tester.delete_volume(self.volume)) return False after_attach = instance.get_dev_dir() new_devices = self.tester.diff(after_attach, before_attach) if len(new_devices) is 0: return False self.volume_device = "/dev/" + new_devices[0].strip() instance.assertFilePresent(self.volume_device) return True
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password) self.servman = self.tester.service_manager self.tester.poll_count = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) try: self.group = self.tester.add_group(group_name="group-" + self.start_time) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.tester.clc = self.tester.service_manager.get_enabled_clc( ).machine self.version = self.tester.clc.sys( "cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0] ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance( keypair=self.keypair.name, group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume( self.volume) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket( self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object( self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix( self.standing_bucket_name, self.standing_key_name) except Exception, e: self.clean_method()
class InstanceBasics(unittest.TestCase): def setUp(self): # Setup basic eutester object eucarc_regex = re.compile("eucarc-") eucarc_dirs = [path for path in os.listdir(".") if eucarc_regex.search(path)] eucarc_path = None if len(eucarc_dirs) > 0: eucarc_path = eucarc_dirs[0] self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath=eucarc_path) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def create_attach_volume(self, instance, size): self.volume = self.tester.create_volume(instance.placement, size) device_path = "/dev/" + instance.block_device_prefix +"j" before_attach = instance.get_dev_dir() try: self.assertTrue(self.tester.attach_volume(instance, self.volume, device_path), "Failure attaching volume") except AssertionError, e: self.assertTrue( self.tester.delete_volume(self.volume)) return False after_attach = instance.get_dev_dir() new_devices = self.tester.diff(after_attach, before_attach) if len(new_devices) is 0: return False self.volume_device = "/dev/" + new_devices[0].strip() instance.assertFilePresent(self.volume_device) return True
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() if not boto.config.has_section('Boto'): boto.config.add_section('Boto') boto.config.set('Boto', 'num_retries', '1') boto.config.set('Boto', 'http_socket_timeout', '20') self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password) self.tester.ec2.connection.timeout = 30 self.servman = self.tester.service_manager self.instance_timeout = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) try: self.group = self.tester.add_group(group_name="group-" + self.start_time ) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" if self.args.emi: self.image = self.tester.get_emi(self.args.emi) else: self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.tester.clc = self.tester.service_manager.get_enabled_clc().machine self.version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0] ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance(image=self.image ,keypair=self.keypair.name,group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume(self.volume) for instance in self.standing_reservation.instances: instance.sys("echo " + instance.id + " > " + self.device) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name) self.run_instance_params = {'image': self.image, 'keypair': self.keypair.name, 'group': self.group.name, 'zone': self.zone, 'timeout': self.instance_timeout} except Exception, e: self.clean_method() raise Exception("Init for testcase failed. Reason: " + str(e))
class ConsoleCleanUp(EutesterTestCase): def __init__(self, extra_args= None, **kwargs): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() for kwarg in kwargs: self.args[kwarg] = kwarg[kwarg] # Setup basic eutester object if self.args.region: self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password) self.tester.poll_count = 120 def populate_resources_for_console_test(self): ''' This method creates resources in the cloud. ''' zone=self.tester.ec2.get_all_zones()[0].name volume=self.tester.ec2.create_volume(1,zone) self.tester.wait_for_volume(volume) snapshot=self.tester.create_snapshot_from_volume(volume) self.tester.create_volume(zone=zone,snapshot=snapshot) keypair=self.tester.ec2.create_key_pair("test-key").name s_group=self.tester.ec2.create_security_group("mygroup", "Security group for console test.").name image=self.tester.get_images()[0] image_id=self.tester.get_images()[0].id instance=self.tester.run_image(image=image, keypair="test-key", group="mygroup",auto_connect=False, zone=zone) instance_id=self.tester.get_instances('running')[0].id ip=self.tester.allocate_address().public_ip self.tester.allocate_address() self.tester.ec2.associate_address(instance_id,ip) self.tester.create_launch_config("LC1",image_id ,keypair ,[s_group], instance_type="m1.small") self.tester.create_as_group("ASG1","LC1",self.tester.get_zones(),min_size=1,max_size=8,desired_capacity=2) instance=self.tester.get_instances('running')[0] self.tester.attach_volume(instance,volume,"vdb")
class ResourceGeneration(EutesterTestCase): def __init__(self, credpath): self.tester = Eucaops(credpath=credpath) def CreateResources(self): users = self.tester.get_all_users() testers = [] for user in users: keys = self.tester.create_access_key( user_name=user['user_name'], delegate_account=user['account_name']) testers.append( Eucaops(aws_access_key_id=keys['access_key_id'], aws_secret_access_key=keys['secret_access_key'], ec2_ip=self.tester.ec2.host, s3_ip=self.tester.s3.host)) for tester in testers: import random zone = random.choice(tester.get_zones()) volume = self.tester.create_volume(size=1, azone=zone) snapshot = self.tester.create_snapshot(volume_id=volume.id) volume_from_snap = self.tester.create_volume(snapshot=snapshot, azone=zone) bucket = self.tester.create_bucket( self.tester.id_generator( 12, string.ascii_lowercase + string.digits)) key = self.tester.upload_object( bucket_name=bucket.name, key_name=self.tester.id_generator( 12, string.ascii_lowercase + string.digits), contents=self.tester.id_generator(200)) keypair = self.tester.add_keypair(self.tester.id_generator()) group = self.tester.add_group(self.tester.id_generator()) def run_suite(self): self.testlist = [] testlist = self.testlist testlist.append(self.create_testcase_from_method(self.CreateResources)) self.run_test_case_list(testlist)
class Euca5338(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.tester = Eucaops(config_file=self.conf, password="******") self.doAuth() self.startDate = date.today() self.endDate = date.today() + timedelta(days=1) self.dates = "-s " + str(self.startDate) + " -e " + str(self.endDate) self.cmd = "eureport-generate-report " + self.dates + " --time-unit=seconds --format=csv -t volume" self.source = "source " + self.tester.credpath + "/eucarc && " def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testName(self): # Get availibility zone self.zone = self.tester.get_zones().pop() # Create and delete a volume self.volume = self.tester.create_volume(self.zone) self.tester.delete_volume(self.volume) # run report, save output and wait 10 seconds self.out1 = self.tester.sys(self.source + self.cmd) self.tester.sleep(10) # run report again self.out2 = self.tester.sys(self.source + self.cmd) # Get the string index for the volume specific information from the report self.index = str(self.out1).find(self.volume.id) # get the newly created/deleted volume information from the two reports self.volStringOne = str(self.out1)[self.index:self.index + 21] self.volStringTwo = str(self.out2)[self.index:self.index + 21] # Compare strings to make sure the GB-Secs is not increasing for the deleted volume. self.tester.debug("Report 1 = " + self.volStringOne + " Report 2 = " + self.volStringTwo) if self.volStringOne == self.volStringTwo: self.tester.debug( "SUCCESS the GB-Secs did not increase for deleted volume " + self.volume.id) pass else: self.fail("FAIL GB-Secs increased for deleted volume " + self.volume.id)
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self, config_file="cloud.conf", password="******"): self.tester = Eucaops( config_file=config_file, password=password) self.servman = self.tester.service_manager self.tester.poll_count = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) self.group = self.tester.add_group(group_name="group-" + self.start_time ) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume(self.volume) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name) def run_testcase(self, testcase_callback, **kwargs): poll_count = 20 poll_interval = 20 while (poll_count > 0): try: testcase_callback(**kwargs) break except Exception, e: self.tester.debug("Attempt failed due to: " + str(e) + "\nRetrying testcase in " + str(poll_interval) ) self.tester.sleep(poll_interval) poll_count = poll_count - 1 if poll_count is 0: self.fail("Could not run an instance after " + str(poll_count) +" tries with " + str(poll_interval) + "s sleep in between")
class Euca2366(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.tester = Eucaops( config_file=self.conf, password="******" ) self.cond = 1 self.doAuth() self.props = EucaProperties(self.tester) def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testName(self): # Get availibility zone self.zone = self.tester.get_zones().pop() # Default Max volume size self.max_volume_size = int(self.props.get_property('storage.maxvolumesizeingb')[0]) # Try and create volume larger than max_volume_size try: self.tester.create_volume(self.zone, self.max_volume_size + 1, timeout=10) except Exception as detail: print detail self.cond = str(detail).count('Max Volume Size Limit Exceeded') if self.cond >= 1: self.tester.debug("SUCCESS") pass else: self.fail("FAIL")
class Euca2366(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.tester = Eucaops(config_file=self.conf, password="******") self.cond = 1 self.doAuth() self.props = EucaProperties(self.tester) def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testName(self): # Get availibility zone self.zone = self.tester.get_zones().pop() # Default Max volume size self.max_volume_size = int(self.props.get_property("storage.maxvolumesizeingb")[0]) # Try and create volume larger than max_volume_size try: self.tester.create_volume(self.zone, self.max_volume_size + 1, timeout=10) except Exception as detail: print detail self.cond = str(detail).count("Max Volume Size Limit Exceeded") if self.cond >= 1: self.tester.debug("SUCCESS") pass else: self.fail("FAIL")
class Euca5338(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.tester = Eucaops( config_file=self.conf, password="******" ) self.doAuth() self.startDate = date.today() self.endDate = date.today() + timedelta(days=1) self.dates = "-s " + str(self.startDate) + " -e " + str(self.endDate) self.cmd = "eureport-generate-report " + self.dates + " --time-unit=seconds --format=csv -t volume" self.source = "source " + self.tester.credpath + "/eucarc && " def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testName(self): # Get availibility zone self.zone = self.tester.get_zones().pop() # Create and delete a volume self.volume = self.tester.create_volume(self.zone) self.tester.delete_volume(self.volume) # run report, save output and wait 10 seconds self.out1 = self.tester.sys(self.source + self.cmd) self.tester.sleep(10) # run report again self.out2 = self.tester.sys(self.source + self.cmd) # Get the string index for the volume specific information from the report self.index = str(self.out1).find(self.volume.id) # get the newly created/deleted volume information from the two reports self.volStringOne = str(self.out1)[self.index:self.index + 21] self.volStringTwo = str(self.out2)[self.index:self.index + 21] # Compare strings to make sure the GB-Secs is not increasing for the deleted volume. self.tester.debug("Report 1 = " + self.volStringOne + " Report 2 = " + self.volStringTwo) if self.volStringOne == self.volStringTwo : self.tester.debug("SUCCESS the GB-Secs did not increase for deleted volume " + self.volume.id) pass else: self.fail("FAIL GB-Secs increased for deleted volume " + self.volume.id)
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password) self.servman = self.tester.service_manager self.tester.poll_count = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) self.group = self.tester.add_group(group_name="group-" + self.start_time ) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.tester.clc = self.tester.service_manager.get_enabled_clc().machine self.old_version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0] ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume(self.volume) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name) def clean_method(self): try: self.tester.terminate_instances() except Exception, e: self.tester.critical("Unable to terminate all instances") self.servman.start_all()
class LoadGenerator(unittest.TestCase): def setUp(self): # Setup basic eutester object self.tester = Eucaops(config_file="../input/2b_tested.lst", password="******") self.tester.poll_count = 40 ### Determine whether virtio drivers are being used self.device_prefix = "sd" if self.tester.get_hypervisor() == "kvm": self.device_prefix = "vd" self.ephemeral = "/dev/" + self.device_prefix + "a2" ### Add and authorize a group for the instance zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def GenerateKeypairs(self, count=10): """ Create and delete keypairs in series """ for i in xrange(count): key_name = "key-generator-" + str(i) keypair = self.tester.add_keypair() self.tester.delete_keypair(keypair) def GenerateVolumes(self, count=10): """ Create and delete volumes in series """ for i in xrange(count): volume = self.tester.create_volume(self.zone) self.tester.delete_volume(volume)
class LoadGenerator(unittest.TestCase): def setUp(self): # Setup basic eutester object self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******") self.tester.poll_count = 40 ### Determine whether virtio drivers are being used self.device_prefix = "sd" if self.tester.get_hypervisor() == "kvm": self.device_prefix = "vd" self.ephemeral = "/dev/" + self.device_prefix + "a2" ### Add and authorize a group for the instance zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def GenerateKeypairs(self, count=10): """ Create and delete keypairs in series """ for i in xrange(count): key_name = "key-generator-" + str(i) keypair = self.tester.add_keypair() self.tester.delete_keypair(keypair) def GenerateVolumes(self, count=10): """ Create and delete volumes in series """ for i in xrange(count): volume = self.tester.create_volume(self.zone) self.tester.delete_volume(volume)
class Euca2411(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.device = "/dev/sda12" self.tester = Eucaops(config_file=self.conf, password="******") self.doAuth() def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testDetachEBS(self): # Get the existing EBS emi self.emi = self.tester.get_emi(root_device_type='ebs') # Start instance self.reservation = self.tester.run_instance(self.emi, keypair=self.keypair.name, group=self.group, is_reachable=False) # Make sure the instance is running set instance variables for instance in self.reservation.instances: if instance.state == "running": self.instance = instance self.zone = instance.placement # Run test self.volume = self.tester.create_volume(self.zone, 2) self.tester.attach_volume(self.instance, self.volume, self.device) self.tester.stop_instances(self.reservation) # EBS Instance now in stopped state, try and detach volume. self.tester.detach_volume(self.volume) pass
class Euca2411(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.device = "/dev/sda12" self.tester = Eucaops( config_file=self.conf, password="******" ) self.doAuth() def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testDetachEBS(self): # Get the existing EBS emi self.emi = self.tester.get_emi(root_device_type='ebs') # Start instance self.reservation = self.tester.run_instance(self.emi, keypair=self.keypair.name, group=self.group, is_reachable=False) # Make sure the instance is running set instance variables for instance in self.reservation.instances: if instance.state == "running": self.instance = instance self.zone = instance.placement # Run test self.volume = self.tester.create_volume(self.zone, 2 ) self.tester.attach_volume(self.instance, self.volume, self.device ) self.tester.stop_instances(self.reservation) # EBS Instance now in stopped state, try and detach volume. self.tester.detach_volume(self.volume) pass
class MigrationTest(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.parser.add_argument('--imgurl', help="BFEBS Image to splat down", default=None) self.get_args() self.tester = Eucaops(config_file=self.args.config, password=self.args.password) self.clusters = self.tester.service_manager.get_all_cluster_controllers( ) for cluster in self.clusters: self.nodes = self.tester.service_manager.get_all_node_controllers( part_name=cluster.partition) if len(self.nodes) < 2: self.tester.debug("Not enough NCs in partition '" + cluster.partition + "' to test instance migration.") exit(0) # TODO if len(self.clusters) > 1: self.tester.debug( "TBD: handle multiple clusters during instance migration tests" ) exit(0) self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.numberOfResources = 3 zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name try: self.tester.get_emi(root_device_type="ebs") except: bfebs = self.do_with_args(BFEBSBasics) bfebs.RegisterImage() def clean_method(self): self.tester.cleanup_artifacts() def MigrationBasic(self, volume=None): enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.reservation = self.tester.run_instance( self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) instance = self.reservation.instances[0] assert isinstance(instance, EuInstance) volume_device = None if volume is not None: volume_device = instance.attach_euvolume(volume) self.tester.service_manager.populate_nodes() source_nc = self.tester.service_manager.get_all_node_controllers( instance_id=instance.id)[0] enabled_clc.sys("source " + self.tester.credpath + "/eucarc &&" + " euserv-migrate-instances -i " + instance.id, code=0) def wait_for_new_nc(): self.tester.service_manager.populate_nodes() destination_nc = self.tester.service_manager.get_all_node_controllers( instance_id=instance.id)[0] return source_nc.hostname == destination_nc.hostname self.tester.wait_for_result(wait_for_new_nc, False, timeout=600, poll_wait=60) self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') if volume_device: instance.sys("ls " + volume_device, code=0) destination_nc = self.tester.service_manager.get_all_node_controllers( instance_id=instance.id)[0] if destination_nc.machine.distro.name is not "vmware": destination_nc.machine.sys("virsh list | grep " + instance.id, code=0) else: destination_nc.machine.sys("esxcli vm process list | grep " + instance.id, code=0) self.tester.terminate_instances(reservation=self.reservation) if volume is not None: self.tester.delete_volume(volume) def MigrationInstanceStoreWithVol(self): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) self.MigrationBasic(volume) def MigrationBasicEBSBacked(self, volume=None): self.image = self.tester.get_emi(root_device_type="ebs") self.MigrationBasic(volume) def MigrationBasicEBSBackedWithVol(self): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) self.MigrationBasicEBSBacked(volume) def MigrateToDest(self): enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.reservation = self.tester.run_instance( self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) instance = self.reservation.instances[0] self.tester.service_manager.populate_nodes() self.source_nc = self.tester.service_manager.get_all_node_controllers( instance_id=instance.id)[0] all_nc = self.tester.service_manager.get_all_node_controllers() self.destination_nc = None for nc in all_nc: if nc.machine.hostname != self.source_nc.machine.hostname: self.destination_nc = nc enabled_clc.sys( "source " + self.tester.credpath + "/eucarc && " + " euserv-migrate-instances -i " + instance.id + " --include-dest " + self.destination_nc.machine.hostname, code=0) def wait_for_new_nc(): self.tester.service_manager.populate_nodes() self.instance_node = self.tester.service_manager.get_all_node_controllers( instance_id=instance.id)[0] return self.instance_node.hostname == self.destination_nc.hostname self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60) self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') # migrate the instance to it's original source node self.destination_nc = self.source_nc enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " + " euserv-migrate-instances -i " + instance.id + " --include-dest " + self.destination_nc.machine.hostname, code=0) self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60) self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.tester.terminate_instances(reservation=self.reservation) def MigrationToDestEBSBacked(self): self.image = self.tester.get_emi(root_device_type="ebs") self.MigrateToDest() def EvacuateNC(self, volume_list=[]): instance_list = [] enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.nodes = self.tester.service_manager.populate_nodes() # pop out one NC to fill in self.source_nc = self.nodes.pop() def set_state(node, state): # retrying, see EUCA-6389 while node.state != state: self.tester.debug(node.hostname + ": SET STATE TO " + state) enabled_clc.sys("euca-modify-service -s " + state + " " + node.hostname, code=0) self.tester.sleep(10) tmpnodes = self.tester.service_manager.populate_nodes() for tmpnode in tmpnodes: if tmpnode.hostname == node.hostname: node = tmpnode # stop all the NCs for node in self.nodes: set_state(node, "STOPPED") self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = self.tester.run_instance( self.image, min=3, max=3, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) for i in xrange(3): instance = self.reservation.instances[i] instance_list.append(instance) assert isinstance(instance, EuInstance) volume_device = None if volume_list: volume_device = instance.attach_euvolume(volume_list[i]) self.nodes = self.tester.service_manager.populate_nodes() # start all the NCs for node in self.nodes: if node.hostname is not self.source_nc.hostname: set_state(node, "ENABLED") self.nodes = self.tester.service_manager.populate_nodes() # evacuate source NC enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " + " euserv-migrate-instances -s " + self.source_nc.machine.hostname, code=0) def wait_for_evacuation(): self.tester.service_manager.populate_nodes() if self.source_nc.machine.distro.name is "vmware": emptyNC = self.source_nc.sys( "esxcli vm process list | grep 'Display Name' | awk '{print $3}'" ) else: emptyNC = self.source_nc.get_virsh_list() return len(emptyNC) == 0 self.tester.wait_for_result(wait_for_evacuation, True, timeout=600, poll_wait=60) for inst in instance_list: self.assertTrue(self.tester.ping(inst.public_dns_name), 'Could not ping instance') self.tester.terminate_instances(reservation=self.reservation) if volume_list: self.tester.delete_volumes(volume_list) def EvacuateNCWithVol(self): volume_list = [] for i in xrange(self.numberOfResources): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) volume_list.append(volume) self.EvacuateNC(volume_list) def EvacuateNCAllEBS(self): self.image = self.tester.get_emi(root_device_type="ebs") self.EvacuateNC()
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument( '--clean_on_exit', action='store_true', default=True, help= 'Boolean, used to flag whether to run clean up method after running test list)' ) if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() ### Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.tester.cleanup_artifacts() self.cleanUpAutoscaling() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970,1,1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta( seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}" .format(metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) self.tester.sleep(60) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query( start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array ##Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug( 'Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics( namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics( metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics( 60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics( 60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() ###Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics( period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): ### setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the last instance to go to running state. state = None while not (str(state).endswith('running')): self.debug( 'Waiting for AutoScaling instance to go to running state ...') self.tester.sleep(15) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() state = self.instance.state self.debug(self.instanceid + ' is now running.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies( policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies( policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies( policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group( name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' ### This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm( 'exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm( 'change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm( 'percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 ### test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric( 'CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric( 'DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): ### The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count( 'The value AWS/ for parameter Namespace is invalid.'): self.tester.debug( 'testAwsReservedNamspaces generated expected InvalidParameterValue error.' ) return True self.tester.debug( 'testAwsReservedNamspaces did not throw expected InvalidParameterValue error.' ) return False
class Instances(unittest.TestCase): def setUp(self): # Setup basic eutester object self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath="../credentials") self.tester.poll_count = 240 self.tester.start_euca_logs() ### Determine whether virtio drivers are being used self.device_prefix = "sd" if self.tester.hypervisor == "kvm": self.device_prefix = "vd" self.ephemeral = "/dev/" + self.device_prefix + "a2" ### Adda and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name) self.tester.sleep(10) def tearDown(self): """Stop Euca logs""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.tester.stop_euca_logs() self.tester.save_euca_logs() self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def test1_Instance(self): """Instance checks including reachability and ephemeral storage""" for instance in self.reservation.instances: self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running') self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance') instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) self.assertTrue( instance_ssh.found("ls -1 " + self.ephemeral, self.ephemeral), 'Did not find ephemeral storage at ' + self.ephemeral) self.assertTrue( self.tester.terminate_instances(self.reservation), 'Failure when terminating instance') def test2_ElasticIps(self): """ Basic test for elastic IPs""" for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address,'Unable to allocate address') self.assertTrue(self.tester.associate_address(instance, address)) self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") address.disassociate() self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.release_address() def test3_MaxInstances(self): """Run the maximum m1.smalls available""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance") available_small = self.tester.get_available_vms() self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') def test4_LargeInstance(self): """Run 1 of the largest instance c1.xlarge""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance") self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,type="c1.xlarge") self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') def test5_MetaData(self): """Check metadata for consistency""" # Missing nodes # ['block-device-mapping/', 'ami-manifest-path' , 'hostname', 'placement/'] for instance in self.reservation.instances: instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) ### Check metadata service self.assertTrue(re.search(instance_ssh.get_metadata("public-keys/0/")[0], self.keypair.name)) self.assertTrue(re.search(instance_ssh.get_metadata("security-groups")[0], self.group)) #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.search(instance_ssh.get_metadata("instance-id")[0], instance.id)) self.assertTrue(re.search(instance_ssh.get_metadata("local-ipv4")[0] , instance.private_ip_address)) self.assertTrue(re.search(instance_ssh.get_metadata("public-ipv4")[0] , instance.ip_address)) self.assertTrue(re.search(instance_ssh.get_metadata("ami-id")[0], instance.image_id)) self.assertTrue(re.search(instance_ssh.get_metadata("ami-launch-index")[0], instance.ami_launch_index)) self.assertTrue(re.search(instance_ssh.get_metadata("reservation-id")[0], self.reservation.id)) self.assertTrue(re.search(instance_ssh.get_metadata("kernel-id")[0], instance.kernel)) self.assertTrue(re.search(instance_ssh.get_metadata("public-hostname")[0], instance.public_dns_name)) self.assertTrue(re.search(instance_ssh.get_metadata("ramdisk-id")[0], instance.ramdisk )) #instance-type self.assertTrue(re.search(instance_ssh.get_metadata("instance-type")[0], instance.instance_type )) def test6_Reboot(self): """Reboot instance ensure IP connectivity and volumes stay attached""" for instance in self.reservation.instances: ### Create 1GB volume in first AZ volume = self.tester.create_volume(self.tester.ec2.get_all_zones()[0].name) ### Pass in check the devices on the instance before the attachment device_path = "/dev/" + self.device_prefix +"j" instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) before_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix) ### Attach the volume to the instance self.assertTrue(self.tester.attach_volume(instance, volume, device_path), "Failure attaching volume") ### Check devices after attachment after_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix) new_devices = self.tester.diff(after_attach, before_attach) ### Check for device in instance self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance before reboot") ### Reboot instance instance.reboot() self.tester.sleep(30) ### Check for device in instance instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance after reboot") self.assertTrue(self.tester.detach_volume(volume), "Unable to detach volume") self.assertTrue(self.tester.delete_volume(volume), "Unable to delete volume") def suite(): tests = ['test1_Instance', 'test2_ElasticIps', 'test3_MaxInstances', 'test4_LargeInstance','test5_MetaData', 'test6_Reboot'] return unittest.TestSuite(map(Instances, tests))
class InstanceBasics(EutesterTestCase): def __init__(self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None, user_data=None, instance_user=None, **kwargs): """ EC2 API tests focused on instance store instances :param credpath: Path to directory containing eucarc file :param region: EC2 Region to run testcase in :param config_file: Configuration file path :param password: SSH password for bare metal machines if config is passed and keys arent synced :param emi: Image id to use for test :param zone: Availability Zone to run test in :param user_data: User Data to pass to instance :param instance_user: User to login to instance as :param kwargs: Additional arguments """ super(InstanceBasics, self).__init__(name=name) if region: self.tester = EC2ops(credpath=credpath, region=region) else: self.tester = Eucaops(config_file=config_file, password=password, credpath=credpath) self.instance_timeout = 600 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) if emi: self.image = emi else: self.image = self.tester.get_emi(root_device_type="instance-store", not_platform="windows") self.address = None self.volume = None self.private_addressing = False if not zone: zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name else: self.zone = zone self.reservation = None self.reservation_lock = threading.Lock() self.run_instance_params = { 'image': self.image, 'user_data': user_data, 'username': instance_user, 'keypair': self.keypair.name, 'group': self.group.name, 'zone': self.zone, 'timeout': self.instance_timeout } self.managed_network = True ### If I have access to the underlying infrastructure I can look ### at the network mode and only run certain tests where it makes sense if hasattr(self.tester, "service_manager"): cc = self.tester.get_component_machines("cc")[0] network_mode = cc.sys( "cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0] if re.search("(SYSTEM|STATIC)", network_mode): self.managed_network = False def set_reservation(self, reservation): self.reservation_lock.acquire() self.reservation = reservation self.reservation_lock.release() def clean_method(self): self.tester.cleanup_artifacts() def BasicInstanceChecks(self): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ reservation = self.tester.run_instance(**self.run_instance_params) for instance in reservation.instances: self.assertTrue(self.tester.wait_for_reservation(reservation), 'Instance did not go to running') self.assertTrue(self.tester.ping(instance.ip_address), 'Could not ping instance') if self.image.virtualization_type == "paravirtual": paravirtual_ephemeral = "/dev/" + instance.rootfs_device + "2" self.assertFalse( instance.found("ls -1 " + paravirtual_ephemeral, "No such file or directory"), "Did not find ephemeral storage at " + paravirtual_ephemeral) elif self.image.virtualization_type == "hvm": hvm_ephemeral = "/dev/" + instance.block_device_prefix + "b" self.assertFalse( instance.found("ls -1 " + hvm_ephemeral, "No such file or directory"), "Did not find ephemeral storage at " + hvm_ephemeral) self.set_reservation(reservation) return reservation def ElasticIps(self): """ This case was developed to test elastic IPs in Eucalyptus. This test case does not test instances that are launched using private-addressing option. The test case executes the following tests: - allocates an IP, associates the IP to the instance, then pings the instance. - disassociates the allocated IP, then pings the instance. - releases the allocated IP address If any of the tests fail, the test case will error out, logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: if instance.ip_address == instance.private_ip_address: self.tester.debug( "WARNING: System or Static mode detected, skipping ElasticIps" ) return reservation self.address = self.tester.allocate_address() self.assertTrue(self.address, 'Unable to allocate address') self.tester.associate_address(instance, self.address) instance.update() self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(self.address) self.address = None assert isinstance(instance, EuInstance) self.tester.sleep(5) instance.update() self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping after dissassociate") self.set_reservation(reservation) return reservation def MultipleInstances(self): """ This case was developed to test the maximum number of m1.small vm types a configured cloud can run. The test runs the maximum number of m1.small vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.run_instance(min=2, max=2, **self.run_instance_params) self.assertTrue(self.tester.wait_for_reservation(reservation), 'Not all instances went to running') self.set_reservation(reservation) return reservation def LargestInstance(self): """ This case was developed to test the maximum number of c1.xlarge vm types a configured cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.run_instance(type="c1.xlarge", **self.run_instance_params) self.assertTrue(self.tester.wait_for_reservation(reservation), 'Not all instances went to running') self.set_reservation(reservation) return reservation def MetaData(self): """ This case was developed to test the metadata service of an instance for consistency. The following meta-data attributes are tested: - public-keys/0/openssh-key - security-groups - instance-id - local-ipv4 - public-ipv4 - ami-id - ami-launch-index - reservation-id - placement/availability-zone - kernel-id - public-hostname - local-hostname - hostname - ramdisk-id - instance-type - any bad metadata that shouldn't be present. Missing nodes ['block-device-mapping/', 'ami-manifest-path'] If any of these tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue( re.match( instance.get_metadata("public-keys/0/openssh-key") [0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue( re.match( instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue( re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue( re.match( instance.get_metadata("local-ipv4")[0], instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue( re.match( instance.get_metadata("public-ipv4")[0], instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue( re.match( instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue( re.match( instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue( re.match( instance.get_metadata("reservation-id")[0], reservation.id), 'Incorrect reservation in metadata') self.assertTrue( re.match( instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') if self.image.virtualization_type == "paravirtual": self.assertTrue( re.match( instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue( re.match( instance.get_metadata("ramdisk-id")[0], instance.ramdisk), 'Incorrect ramdisk in metadata') self.assertTrue( re.match( instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue( re.match( instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue( re.match( instance.get_metadata("hostname")[0], instance.private_dns_name), 'Incorrect host name in metadata') self.assertTrue( re.match( instance.get_metadata("instance-type")[0], instance.instance_type), 'Incorrect instance type in metadata') bad_meta_data_keys = ['foobar'] for key in bad_meta_data_keys: self.assertTrue( re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') self.set_reservation(reservation) return reservation def DNSResolveCheck(self): """ This case was developed to test DNS resolution information for public/private DNS names and IP addresses. The tested DNS resolution behavior is expected to follow AWS EC2. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - nslookup on hostname; checks to see if it matches local-ipv4 - nslookup on local-hostname; check to see if it matches local-ipv4 - nslookup on local-ipv4; check to see if it matches local-hostname - nslookup on public-hostname; check to see if it matches local-ipv4 - nslookup on public-ipv4; check to see if it matches public-host If any of these tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: if not re.search("internal", instance.private_dns_name): self.tester.debug( "Did not find instance DNS enabled, skipping test") self.set_reservation(reservation) return reservation # Test to see if Dynamic DNS has been configured # # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against public IP and public DNS name # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve assert isinstance(instance, EuInstance) # Check nslookup to resolve public DNS Name to local-ipv4 address self.assertTrue( instance.found("nslookup " + instance.public_dns_name, instance.private_ip_address), "Incorrect DNS resolution for hostname.") # Check nslookup to resolve public-ipv4 address to public DNS name if self.managed_network: self.assertTrue( instance.found("nslookup " + instance.ip_address, instance.public_dns_name), "Incorrect DNS resolution for public IP address") # Check nslookup to resolve private DNS Name to local-ipv4 address if self.managed_network: self.assertTrue( instance.found("nslookup " + instance.private_dns_name, instance.private_ip_address), "Incorrect DNS resolution for private hostname.") # Check nslookup to resolve local-ipv4 address to private DNS name self.assertTrue( instance.found("nslookup " + instance.private_ip_address, instance.private_dns_name), "Incorrect DNS resolution for private IP address") self.assertTrue(self.tester.ping(instance.public_dns_name)) self.set_reservation(reservation) return reservation def Reboot(self): """ This case was developed to test IP connectivity and volume attachment after instance reboot. The following tests are done for this test case: - creates a 1 gig EBS volume, then attach volume - reboot instance - attempts to connect to instance via ssh - checks to see if EBS volume is attached - detaches volume - deletes volume If any of these tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: ### Create 1GB volume in first AZ volume = self.tester.create_volume(instance.placement, size=1, timepergig=180) instance.attach_volume(volume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(volume) self.tester.delete_volume(volume) self.set_reservation(reservation) return reservation def Churn(self): """ This case was developed to test robustness of Eucalyptus by starting instances, stopping them before they are running, and increase the time to terminate on each iteration. This test case leverages the BasicInstanceChecks test case. The following steps are ran: - runs BasicInstanceChecks test case 5 times, 10 second apart. - While each test is running, run and terminate instances with a 10sec sleep in between. - When a test finishes, rerun BasicInstanceChecks test case. If any of these tests fail, the test case will error out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) try: available_instances_before = self.tester.get_available_vms( zone=self.zone) if available_instances_before > 4: count = 4 else: count = available_instances_before except IndexError, e: self.debug("Running as non-admin, defaulting to 4 VMs") available_instances_before = count = 4 future_instances = [] with ThreadPoolExecutor(max_workers=count) as executor: ## Start asynchronous activity ## Run 5 basic instance check instances 10s apart for i in xrange(count): future_instances.append( executor.submit(self.BasicInstanceChecks)) self.tester.sleep(10) with ThreadPoolExecutor(max_workers=count) as executor: ## Start asynchronous activity ## Terminate all instances for future in future_instances: executor.submit(self.tester.terminate_instances, future.result()) def available_after_greater(): return self.tester.get_available_vms( zone=self.zone) >= available_instances_before self.tester.wait_for_result(available_after_greater, result=True, timeout=360)
else: raise Exception("Failed to run an instance using emi:"+image.id) except Exception, e: pmsg("Doh, error while trying to run instance using emi:"+image.id) raise e try: keypath = os.getcwd() + "/" + keypair.name + ".pem" pmsg('Getting contents from /dev...') before_attach = instance.get_dev_dir() except Exception, ie: raise Exception("Failed to retrieve contents of /dev dir from instance, Error:"+str(ie)) pmsg("Got snapshot of /dev, now creating a volume of "+str(rfsize)+" to attach to our instance...") volume=tester.create_volume(zone, rfsize) dev = "/dev/sdf" pmsg("Attaching Volume ("+volume.id+") to instance("+instance.id+") trying dev("+dev+")") try: volume.attach(instance.id, "/dev/sdf") except Exception, ve: raise Exception("Error attaching volume:"+str(volume.id)+", Error:"+str(ve)) pmsg("Sleeping and waiting for volume to attach fully to instance") tester.sleep(20) for x in range(0,10): #after_attach = instance.sys('ls -1 /dev/| grep "sd\|vd"') after_attach = instance.get_dev_dir() #The attached device should be the difference in our /dev snapshots
### Check that the ephemeral is available to the VM if options.device_prefix + "a2\n" in before_attach: print "Found ephemeral device" else: instance.terminate() tester.fail("Did not find ephemeral mounted from /dev/" + options.device_prefix + "a2"+ " to /mnt on " + str(instance)) tester.tee("\n".join(tester.grep_euca_log(component="nc00",regex=instance.id)) + "\n".join(tester.grep_euca_log(regex=instance.id)) ) options.runs -= 1 continue ### If the --ebs flag was passed to the script, attach a volume and verify it can be used if options.ebs == True: ## Create the volume try: volume = tester.create_volume(options.zone, size=1 ) except Exception, e: tester.fail("Something went wrong when creating the volume") tester.tee( "Volume error\n".join(tester.grep_euca_log(regex=volume.id)) ) options.runs -= 1 continue ### Attach the volume (need to write a routine to validate the attachment) try: tester.tee("Attaching " + str(volume) + " as /dev/sdj") volume.attach(instance.id, "/dev/sdj") except Exception, e: volume.delete() tester.fail("Something went wrong when attaching " + str(volume) + " to " + str(instance) ) tester.tee( "Volume error\n".join(tester.grep_euca_log(regex=volume.id)) ) options.runs -= 1
class PopulateUpgrade(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password) self.tester.poll_count = 120 self.security_groups = [] ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None self.volume = None self.snapshot = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.reservation = None def clean_method(self): pass def Instances(self, type="instance-store"): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ test_image = self.tester.get_emi(root_device_type=type) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) self.security_groups.append(self.group) # Test: INSTANCESTORE VOLATTACH:no ADDR:user instance_1 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0] instance_1_address = self.tester.allocate_address() self.tester.associate_address(instance=instance_1, address=instance_1_address) # Test: INSTANCESTORE VOLATTACH:no ADDR:system instance_2 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0] # Test: INSTANCESTORE VOLATTACH:no ADDR:system instance_3 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0] # Test: INSTANCESTORE VOLATTACH:yes ADDR:user instance_4 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0] instance_4_address = self.tester.allocate_address() self.tester.associate_address(instance=instance_4, address=instance_4_address) volume = self.tester.create_volume(zone=self.zone) instance_4.attach_volume(volume=volume) # Test: INSTANCESTORE VOLATTACH:yes ADDR:system instance_5 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0] volume = self.tester.create_volume(zone=self.zone) instance_5.attach_volume(volume=volume) self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) self.security_groups.append(self.group) # Test: INSTANCESTORE VOLATTACH:yes ADDR:system instance_6 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0] def PopulateAll(self): self.Instances("instance-store") self.Instances("ebs")
class MigrationTest(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.parser.add_argument('--imgurl', help="BFEBS Image to splat down", default=None) self.get_args() self.tester = Eucaops( config_file=self.args.config, password=self.args.password) self.numberOfNodes = self.tester.service_manager.get_all_node_controllers() if len(self.numberOfNodes) < 2: exit("Not enough NCs to test instance migration.") self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.numberOfResources = 3 zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name try: self.tester.get_emi(root_device_type="ebs") except: bfebs = self.do_with_args(BFEBSBasics) bfebs.RegisterImage() def clean_method(self): self.tester.cleanup_artifacts() def MigrationBasic(self, volume=None): enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.reservation = self.tester.run_instance(self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) instance = self.reservation.instances[0] assert isinstance(instance, EuInstance) volume_device = None if volume is not None: volume_device = instance.attach_euvolume(volume) self.tester.service_manager.populate_nodes() source_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0] enabled_clc.sys( "source " + self.tester.credpath + "/eucarc &&" + self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " + instance.id ) def wait_for_new_nc(): self.tester.service_manager.populate_nodes() destination_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0] return source_nc.hostname == destination_nc.hostname self.tester.wait_for_result(wait_for_new_nc, False, timeout=600, poll_wait=60) self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') if volume_device: instance.sys("ls " + volume_device, code=0) destination_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0] if destination_nc.machine.distro.name is not "vmware": destination_nc.machine.sys("virsh list | grep " + instance.id, code=0) else: destination_nc.machine.sys("esxcli vm process list | grep " + instance.id, code=0) self.tester.terminate_instances(reservation=self.reservation) if volume is not None: self.tester.delete_volume(volume) def MigrationInstanceStoreWithVol(self): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) self.MigrationBasic(volume) def MigrationBasicEBSBacked(self, volume=None): self.image = self.tester.get_emi(root_device_type="ebs") self.MigrationBasic(volume) def MigrationBasicEBSBackedWithVol(self): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) self.MigrationBasicEBSBacked(volume) def MigrateToDest(self): enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.reservation = self.tester.run_instance(self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) instance = self.reservation.instances[0] self.tester.service_manager.populate_nodes() self.source_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0] all_nc = self.tester.service_manager.get_all_node_controllers() self.destination_nc = None for nc in all_nc: if nc.machine.hostname != self.source_nc.machine.hostname: self.destination_nc = nc enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " + instance.id + " --dest " + self.destination_nc.machine.hostname) def wait_for_new_nc(): self.tester.service_manager.populate_nodes() self.instance_node = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0] return self.instance_node.hostname == self.destination_nc.hostname self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60) self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance') # migrate the instance to it's original source node self.destination_nc = self.source_nc enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " + instance.id + " --dest " + self.destination_nc.machine.hostname) self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60) self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.tester.terminate_instances(reservation=self.reservation) def MigrationToDestEBSBacked(self): self.image = self.tester.get_emi(root_device_type="ebs") self.MigrateToDest() def EvacuateNC(self, volume_list = []): instance_list = [] enabled_clc = self.tester.service_manager.get_enabled_clc().machine self.nodes = self.tester.service_manager.populate_nodes() # pop out one NC to fill in self.source_nc = self.nodes.pop() def set_state(node, state): # retrying, see EUCA-6389 while node.state != state: self.tester.debug(node.hostname + ": SET STATE TO " + state) enabled_clc.sys("euca-modify-service -s " + state + " " + node.hostname) self.tester.sleep(10) tmpnodes = self.tester.service_manager.populate_nodes() for tmpnode in tmpnodes: if tmpnode.hostname == node.hostname: node = tmpnode # stop all the NCs for node in self.nodes: set_state(node, "STOPPED") self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = self.tester.run_instance(self.image, min=3, max=3, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone) for i in xrange(3): instance = self.reservation.instances[i] instance_list.append(instance) assert isinstance(instance, EuInstance) volume_device = None if volume_list: volume_device = instance.attach_euvolume(volume_list[i]) self.nodes = self.tester.service_manager.populate_nodes() # start all the NCs for node in self.nodes: if node.hostname is not self.source_nc.hostname: set_state(node, "ENABLED") self.nodes = self.tester.service_manager.populate_nodes() # evacuate source NC enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-migrate-instances --source " + self.source_nc.machine.hostname) def wait_for_evacuation(): self.tester.service_manager.populate_nodes() if self.source_nc.machine.distro.name is "vmware": emptyNC = self.source_nc.sys("esxcli vm process list | grep 'Display Name' | awk '{print $3}'") else: emptyNC = self.source_nc.get_virsh_list() return len(emptyNC) == 0 self.tester.wait_for_result(wait_for_evacuation, True, timeout=600, poll_wait=60) for inst in instance_list: self.assertTrue(self.tester.ping(inst.public_dns_name), 'Could not ping instance') self.tester.terminate_instances(reservation=self.reservation) if volume_list: self.tester.delete_volumes(volume_list) def EvacuateNCWithVol(self): volume_list = [] for i in xrange(self.numberOfResources): volume = self.tester.create_volume(zone=self.zone) assert isinstance(volume, EuVolume) volume_list.append(volume) self.EvacuateNC(volume_list) def EvacuateNCAllEBS(self): self.image = self.tester.get_emi(root_device_type="ebs") self.EvacuateNC()
class LoadGenerator(unittest.TestCase): def setUp(self): # Setup basic eutester object if options.config_file: self.tester = Eucaops(config_file=options.config_file, password=options.clc_password) else: print "\tNeed to pass --config_file option. Try --help for more information\n" exit(1) ### Grab zone for volume tests zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.volumes = [] self.statuses = [] def tearDown(self): """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() """ Print the results of volumes created and total volumes of cloud """ self.current_ebs_reporting() """ Print all the volumes' statuses for the entire cloud """ self.overall_ebs_reporting() """ Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS - * eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info * eucalyptus_cloud relations: metadata_volumes """ self.iscivolumeinfo_db_dump() self.iscsimetadata_db_dump() self.volumes_db_dump() self.cloudmetadata_db_dump() self.storagestats_db_dump() """ Now destroy volumes created and reached available state from test """ for vol in self.volumes: if vol.status == 'available': self.tester.delete_volume(vol) self.volumes = None self.statuses = None self.tester = None def current_ebs_reporting(self): """ Print the results of volumes created and total volumes of cloud """ found_creating = self.statuses.count("creating") found_available = self.statuses.count("available") found_deleting = self.statuses.count("deleting") found_deleted = self.statuses.count("deleted") found_failed = self.statuses.count("failed") self.tester.debug("##########################################\n") self.tester.debug("\t**** Results of Finished Test ****\n") self.tester.debug("\t" + str(found_creating) + " Volumes in CREATING state.\n") self.tester.debug("\t" + str(found_available) + " Volumes in AVAILABLE state.\n") self.tester.debug("\t" + str(found_deleting) + " Volumes in DELETING state.\n") self.tester.debug("\t" + str(found_deleted) + " Volumes in DELETED state.\n") self.tester.debug("\t" + str(found_failed) + " Volumes in FAILED state.\n") self.tester.debug("##########################################\n") found_creating = None found_available = None found_deleting = None found_deleted = None found_failed = None def overall_ebs_reporting(self): """ Print all the volumes' statuses for the entire cloud """ volumes = self.tester.get_volumes() statuses = [] for master_vol in volumes: statuses.append(master_vol.status) overall_creating = statuses.count("creating") overall_available = statuses.count("available") overall_deleting = statuses.count("deleting") overall_deleted = statuses.count("deleted") overall_failed = statuses.count("failed") """ Grab cloud property for volume location to get stats of files. """ volumes_dir = "" for machine in self.tester.get_component_machines("clc"): if volumes_dir == "": volumes_dir = (machine.sys( "source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'" )) overall_ebs_size = len(volumes) ebs_filesystem_size = "" for machine in self.tester.get_component_machines("sc"): ebs_filesystem_size = (machine.sys("du -sh " + volumes_dir[0])) self.tester.debug("##########################################\n") self.tester.debug("\t**** Results of Current Volumes on Cloud ****\n") self.tester.debug("\t" + str(overall_creating) + " Volumes in CREATING state.\n") self.tester.debug("\t" + str(overall_available) + " Volumes in AVAILABLE state.\n") self.tester.debug("\t" + str(overall_deleting) + " Volumes in DELETING state.\n") self.tester.debug("\t" + str(overall_deleted) + " Volumes in DELETED state.\n") self.tester.debug("\t" + str(overall_failed) + " Volumes in FAILED state.\n") self.tester.debug("==========================================\n") self.tester.debug("Sum of All EBS Volume Sizes (in GBs): " + str(overall_ebs_size) + "\n") self.tester.debug( "Disk Space Used under Cloud defined Storage Directory [ " + volumes_dir[0] + " ]: " + ebs_filesystem_size[0] + "\n") self.tester.debug("##########################################\n") """ Make sure and clean up volumes that got to "available" state; this is mostly for EbsBotoStress cleanup """ for vol in volumes: if vol.status == 'available': self.tester.delete_volume(vol) """ Clean up everything else """ statuses = None volumes = None ebs_filesystem_size = None overall_ebs_size = None volumes_dir = None overall_creating = None overall_available = None overall_deleting = None overall_deleted = None overall_failed = None def iscivolumeinfo_db_dump(self): """ Print contents of iscsivolumeinfo relation in eucalyptus_storage table """ now = datetime.datetime.now() iscsivolinfo_file = "~/iscsivolinfo_file-" + str( now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys( "psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsivolumeinfo' -o " + iscsivolinfo_file) db_dump = (machine.sys("cat " + iscsivolinfo_file)) machine.sys("rm -rf " + iscsivolinfo_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of iscsivolumeinfo relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None iscsivolinfo_file = None db_dump = None def iscsimetadata_db_dump(self): """ Print contents of iscsimetadata relation in eucalyptus_storage table """ now = datetime.datetime.now() iscsimetadata_file = "~/iscsimetadata_file-" + str( now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys( "psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsimetadata' -o " + iscsimetadata_file) db_dump = (machine.sys("cat " + iscsimetadata_file)) machine.sys("rm -rf " + iscsimetadata_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of iscsimetadata relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None iscsimetadata_file = None db_dump = None def volumes_db_dump(self): """ Print contents of volumes relation in eucalyptus_storage table """ now = datetime.datetime.now() volumes_file = "~/volumes_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys( "psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from volumes' -o " + volumes_file) db_dump = (machine.sys("cat " + volumes_file)) machine.sys("rm -rf " + volumes_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of volume relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None volumes_file = None db_dump = None def cloudmetadata_db_dump(self): """ Print contents of metadata_volumes relation in eucalyptus_cloud table """ now = datetime.datetime.now() cloudmetadata_file = "~/cloudmetadata_file-" + str( now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys( "psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_cloud -c 'select * from metadata_volumes' -o " + cloudmetadata_file) db_dump = (machine.sys("cat " + cloudmetadata_file)) machine.sys("rm -rf " + cloudmetadata_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of metadata_volumes relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None cloudmetadata_file = None db_dump = None def storagestats_db_dump(self): """ Print contents of storage_stats_info relation in eucalyptus_storage table """ now = datetime.datetime.now() storagestats_file = "~/storagestats_file-" + str( now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys( "psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from storage_stats_info' -o " + storagestats_file) db_dump = (machine.sys("cat " + storagestats_file)) machine.sys("rm -rf " + storagestats_file) self.tester.debug("##########################################\n") self.tester.debug( "\t**** Content of storage_stats_info relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None storagestats_file = None db_dump = None def run_command_list(self, machine, list): for command in list: machine.sys(command) def get_clc_stats(self): basic_commands = ['df -B M', 'ps aux', 'free', 'uptime'] clc_commands = ['euca-describe-properties | grep volume'] clc_status = clc_commands + basic_commands for machine in self.tester.get_component_machines("clc"): for command in clc_status: machine.sys("source " + self.tester.credpath + "/eucarc && " + command) def get_sc_stats(self): basic_commands = ['df -B M', 'ps aux', 'free', 'uptime'] """ Grab cloud property for volume location to get stats of files. """ volumes_dir = "" for machine in self.tester.get_component_machines("clc"): if volumes_dir == "": volumes_dir = (machine.sys( "source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'" )) sc_commands = [ 'tgtadm --lld iscsi --op show --mode account', 'tgtadm --lld iscsi --op show --mode target', 'du -sh ' + volumes_dir[0], 'lvdisplay | grep "/dev/vg-"', 'vgdisplay', 'pvdisplay', 'losetup -a | grep ' + volumes_dir[0] + ' | wc -l', 'ls -l ' + volumes_dir[0] ] sc_status = basic_commands + sc_commands for machine in self.tester.get_component_machines("sc"): self.run_command_list(machine, sc_status) def GenerateVolumesLoad(self): """ Grab EBS Timeout property of Cloud """ ebs_timeout = "" for machine in self.tester.get_component_machines("clc"): if ebs_timeout == "": ebs_timeout = (machine.sys( "source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'" )) """ Create volumes in series """ for i in xrange(options.number_of_vol): volume = self.tester.create_volume(self.zone) if volume is not None: self.volumes.append(volume) self.statuses.append(volume.status) """ Sleep the EBS Timeout property; only have to call it once """ self.tester.debug("###\n") self.tester.debug( "###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n") self.tester.debug("###\n") self.tester.sleep(float(ebs_timeout[0])) def GenerateVolumesBoto(self): """ Grab EBS Timeout property of Cloud """ ebs_timeout = "" for machine in self.tester.get_component_machines("clc"): if ebs_timeout == "": ebs_timeout = (machine.sys( "source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'" )) """ Create 1 Gig volumes in series """ vol_size = 1 for i in xrange(options.number_of_vol): volume = self.tester.ec2.create_volume(vol_size, self.zone) if volume is not None: self.tester.debug("Volume (" + volume.id + ") is in (" + volume.status + ") state.\n") self.volumes.append(volume) self.statuses.append(volume.status) """ Sleep the EBS Timeout property; only have to call it once """ self.tester.debug("###\n") self.tester.debug( "###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n") self.tester.debug("###\n") self.tester.sleep(float(ebs_timeout[0])) def GenerateCloudStatistics(self): """ Grab status of all volumes on cloud, along with database information """ self.overall_ebs_reporting() """ Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS - * eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info * eucalyptus_cloud relations: metadata_volumes """ self.iscivolumeinfo_db_dump() self.iscsimetadata_db_dump() self.volumes_db_dump() self.cloudmetadata_db_dump() self.storagestats_db_dump() def EbsStress(self, testcase="GenerateVolumesLoad"): """ Generate volume load; For each thread created - options.number_of_threads - options.number_of_vol will be created """ from multiprocessing import Process from multiprocessing import Queue ### Increase time to by step seconds on each iteration ### This also gives enough time for creds to be pulled from CLC step = 10 """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run GenerateVolumesLoad testcase seconds apart for i in xrange(options.number_of_threads): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i, testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) + " in " + str(step * i)) p.start() fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result)) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests") self.tester.debug("Successfully completed EbsStress test") def EbsBotoStress(self, testcase="GenerateVolumesBoto"): """ Generate volume load; For each thread created - options.number_of_threads - options.number_of_vol will be created """ from multiprocessing import Process from multiprocessing import Queue ### Increase time to by step seconds on each iteration ### This also gives enough time for creds to be pulled from CLC step = 10 """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run GenerateVolumesLoad testcase seconds apart for i in xrange(options.number_of_threads): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i, testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) + " in " + str(step * i)) p.start() fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result)) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests") self.tester.debug("Successfully completed EbsBotoStress test") def run_testcase_thread(self, queue, delay=20, testname=None): ### Thread that runs a testcase (function) and returns its pass or fail result self.tester.sleep(delay) try: result = unittest.TextTestRunner(verbosity=2).run( LoadGenerator(testname)) except Exception, e: queue.put(1) raise e if result.wasSuccessful(): self.tester.debug("Passed test: " + testname) queue.put(0) return False else: self.tester.debug("Failed test: " + testname) queue.put(1) return True
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument('--clean_on_exit', action='store_true', default=True, help='Boolean, used to flag whether to run clean up method after running test list)') if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # ## Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.cleanUpAutoscaling() self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970, 1, 1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}".format( metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) def isMatricsAvailable(): metrics = self.tester.cw.list_metrics(namespace=self.namespace) if not metrics: return False else: return True self.tester.wait_for_result(isMatricsAvailable, True, timeout=900, poll_wait=300) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query(start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array # #Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug('Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics(metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics(60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics(60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() # ##Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics(period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): # ## setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the instance to go to running state. self.tester.wait_for_result(self.tester.wait_for_instances, True, timeout=600, group_name=self.auto_scaling_group_name) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() self.debug('ASG is now setup.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies(policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies(policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies(policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group(name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' # ## This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm('exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm('change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm('percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 # ## test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric('CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric('DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): # ## The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count('The value AWS/ for parameter Namespace is invalid.'): self.tester.debug('testAwsReservedNamspaces generated expected InvalidParameterValue error.') return True self.tester.debug('testAwsReservedNamspaces did not throw expected InvalidParameterValue error.') return False
class InstanceBasics(unittest.TestCase): def setUp(self, credpath=None): # Setup basic eutester object if credpath is None: credpath = arg_credpath self.tester = Eucaops( credpath=credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) ### Use a random instance-store backed EMI if no cli option set if arg_emi is False: self.image = self.tester.get_emi(root_device_type="instance-store") else: self.image = self.tester.get_emi(arg_emi) self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def BasicInstanceChecks(self, zone = None): """Instance checks including reachability and ephemeral storage""" if zone is None: zone = self.zone if self.reservation is None: self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running') self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"), 'Did not find ephemeral storage at ' + instance.rootfs_device + "2") return self.reservation def ElasticIps(self, zone = None): """ Basic test for elastic IPs Allocate an IP, associate it with an instance, ping the instance Disassociate the IP, ping the instance Release the address""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,zone=zone) for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address,'Unable to allocate address') self.tester.associate_address(instance, address) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(address) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate") return self.reservation def MaxSmallInstances(self, available_small=None,zone = None): """Run the maximum m1.smalls available""" if available_small is None: available_small = self.tester.get_available_vms() if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small, zone=zone) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') return self.reservation def LargestInstance(self, zone = None): """Run 1 of the largest instance c1.xlarge""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,type="c1.xlarge",zone=zone) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') return self.reservation def MetaData(self, zone=None): """Check metadata for consistency""" # Missing nodes # ['block-device-mapping/', 'ami-manifest-path'] if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata') self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata') self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata') BAD_META_DATA_KEYS = ['foobar'] for key in BAD_META_DATA_KEYS: self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') return self.reservation def DNSResolveCheck(self, zone=None): """Check DNS resolution information for public/private DNS names and IP addresses. The DNS resolution behavior follows AWS EC2.""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0])[3]), "DNS lookup failed for private IP address.") # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname self.assertTrue(re.search(instance.get_metadata("local-hostname")[0], instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0])[4]), "Incorrect DNS resolution for private IP address") # Perform DNS resolution against public IP and public DNS name # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.") # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0])[3]), "DNS lookup failed for public IP address.") # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname self.assertTrue(re.search(instance.get_metadata("public-hostname")[0], instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0])[4]), "Incorrect DNS resolution for public IP address") return self.reservation def DNSCheck(self, zone=None): """Check to make sure Dynamic DNS reports correct information for public/private IP address and DNS names""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Make sure that private_ip_address is not the same as local-hostname self.assertFalse(re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on') # Make sure that ip_address is not the same as public-hostname self.assertFalse(re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on') return self.reservation def Reboot(self, zone=None): """Reboot instance ensure IP connectivity and volumes stay attached""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ### Create 1GB volume in first AZ self.volume = self.tester.create_volume(instance.placement, 1) euvolume = EuVolume.make_euvol_from_vol(self.volume) self.volume_device = instance.attach_euvolume(euvolume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(euvolume) return self.reservation def Churn(self, testcase="BasicInstanceChecks"): """Start instances and stop them before they are running, increase time to terminate on each iteration""" from multiprocessing import Process from multiprocessing import Queue ### Increase time to terminate by step seconds on each iteration step = 10 ## Run through count iterations of test count = self.tester.get_available_vms("m1.small") / 2 thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run 5 basic instance check instances 10s apart for i in xrange(count): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i)) p.start() ### While the other tests are running, run and terminate count instances with a 10s sleep in between for i in xrange(count): self.reservation = self.image.run() self.tester.debug("Sleeping for " + str(step) + " seconds before terminating instances") self.tester.sleep(step ) for instance in self.reservation.instances: instance.terminate() self.assertTrue(self.tester.wait_for_instance(instance, "terminated"), "Instance did not go to terminated") ### Once the previous test is complete rerun the BasicInstanceChecks test case ### Wait for an instance to become available count = self.tester.get_available_vms("m1.small") poll_count = 30 while poll_count > 0: self.tester.sleep(5) count = self.tester.get_available_vms("m1.small") if count > 0: self.tester.debug("There is an available VM to use for final test") break poll_count -= 1 fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result) ) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: raise Exception("Failure detected in one of the " + str(count) + " Basic Instance tests") self.tester.debug("Successfully completed churn test") def PrivateIPAddressing(self, zone = None): """Basic test to run an instance with Private only IP and later allocate/associate/diassociate/release an Elastic IP. In the process check after diassociate the instance has only got private IP or new Public IP gets associated to it""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, private_addressing=True, zone=zone) for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address,'Unable to allocate address') self.assertTrue(self.tester.associate_address(instance, address)) self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") address.disassociate() self.tester.sleep(30) instance.update() self.assertFalse( self.tester.ping(instance.public_dns_name), "Was able to ping instance that should have only had a private IP") address.release() if instance.public_dns_name != instance.private_dns_name: self.fail("Instance received a new public IP: " + instance.public_dns_name) return self.reservation def ReuseAddresses(self, zone = None): """ Run instances in series and ensure they get the same address""" prev_address = None if zone is None: zone = self.zone ### Run the test 5 times in a row for i in xrange(5): self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: if prev_address is not None: self.assertTrue(re.search(str(prev_address) ,str(instance.public_dns_name)), str(prev_address) +" Address did not get reused but rather " + str(instance.public_dns_name)) prev_address = instance.public_dns_name self.tester.terminate_instances(self.reservation) def run_testcase_thread(self, queue,delay = 20, name="MetaData"): ### Thread that runs a testcase (function) and returns its pass or fail result self.tester.sleep(delay) try: result = unittest.TextTestRunner(verbosity=2).run(InstanceBasics(name)) except Exception, e: queue.put(1) raise e if result.wasSuccessful(): self.tester.debug("Passed test: " + name) queue.put(0) return False else: self.tester.debug("Failed test: " + name) queue.put(1) return True
class HAtests(InstanceBasics, BucketTestSuite): def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() if not boto.config.has_section('Boto'): boto.config.add_section('Boto') boto.config.set('Boto', 'num_retries', '1') boto.config.set('Boto', 'http_socket_timeout', '20') self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password) self.tester.ec2.connection.timeout = 30 self.servman = self.tester.service_manager self.instance_timeout = 120 ### Add and authorize a group for the instance self.start_time = str(int(time.time())) try: self.group = self.tester.add_group(group_name="group-" + self.start_time) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + self.start_time) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" if self.args.emi: self.image = self.tester.get_emi(self.args.emi) else: self.image = self.tester.get_emi( root_device_type="instance-store") self.reservation = None self.private_addressing = False self.bucket_prefix = "buckettestsuite-" + self.start_time + "-" self.test_user_id = self.tester.s3.get_canonical_user_id() zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.tester.clc = self.tester.service_manager.get_enabled_clc( ).machine self.version = self.tester.clc.sys( "cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0] ### Create standing resources that will be checked after all failures ### Instance, volume, buckets ### self.standing_reservation = self.tester.run_instance( image=self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone) self.volume = self.tester.create_volume(self.zone) self.device = self.standing_reservation.instances[0].attach_volume( self.volume) for instance in self.standing_reservation.instances: instance.sys("echo " + instance.id + " > " + self.device) self.standing_bucket_name = "failover-bucket-" + self.start_time self.standing_bucket = self.tester.create_bucket( self.standing_bucket_name) self.standing_key_name = "failover-key-" + self.start_time self.standing_key = self.tester.upload_object( self.standing_bucket_name, self.standing_key_name) self.standing_key = self.tester.get_objects_by_prefix( self.standing_bucket_name, self.standing_key_name) self.run_instance_params = { 'image': self.image, 'keypair': self.keypair.name, 'group': self.group.name, 'zone': self.zone, 'timeout': self.instance_timeout } except Exception, e: self.clean_method() raise Exception("Init for testcase failed. Reason: " + str(e))
class PopulateUpgrade(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument("--region", default=None) if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.region: self.tester = EC2ops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) self.tester.poll_count = 120 self.security_groups = [] ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None self.volume = None self.snapshot = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.reservation = None def clean_method(self): pass def Instances(self, type="instance-store"): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ test_image = self.tester.get_emi(root_device_type=type) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") self.security_groups.append(self.group) # Test: INSTANCESTORE VOLATTACH:no ADDR:user instance_1 = self.tester.run_instance( test_image, keypair=self.keypair.name, group=self.group.name).instances[0] instance_1_address = self.tester.allocate_address() self.tester.associate_address(instance=instance_1, address=instance_1_address) # Test: INSTANCESTORE VOLATTACH:no ADDR:system instance_2 = self.tester.run_instance( test_image, keypair=self.keypair.name, group=self.group.name).instances[0] # Test: INSTANCESTORE VOLATTACH:no ADDR:system instance_3 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0] # Test: INSTANCESTORE VOLATTACH:yes ADDR:user instance_4 = self.tester.run_instance( test_image, keypair=self.keypair.name, group=self.group.name).instances[0] instance_4_address = self.tester.allocate_address() self.tester.associate_address(instance=instance_4, address=instance_4_address) volume = self.tester.create_volume(zone=self.zone) instance_4.attach_volume(volume=volume) # Test: INSTANCESTORE VOLATTACH:yes ADDR:system instance_5 = self.tester.run_instance( test_image, keypair=self.keypair.name, group=self.group.name).instances[0] volume = self.tester.create_volume(zone=self.zone) instance_5.attach_volume(volume=volume) self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") self.security_groups.append(self.group) # Test: INSTANCESTORE VOLATTACH:yes ADDR:system instance_6 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0] def PopulateAll(self): self.Instances("instance-store") self.Instances("ebs")
class ReportingBasics(EutesterTestCase): def __init__(self, config_file=None, password=None): self.setuptestcase() # Setup basic eutester object self.tester = Eucaops(config_file=config_file, password=password) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.volume = None self.bucket = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.clean_method = self.cleanup self.cur_time = str(int(time.time())) date_fields = time.localtime() self.date = str(date_fields.tm_year) + "-" + str( date_fields.tm_mon) + "-31" clcs = self.tester.get_component_machines("clc") if len(clcs) is 0: raise Exception("No CLC found") else: self.clc = clcs[0] poll_interval = 1 write_interval = 1 size_time_size_unit = "MB" size_time_time_unit = "MINS" size_unit = "MB" time_unit = "MINS" self.modify_property(property="reporting.default_poll_interval_mins", value=poll_interval) self.modify_property(property="reporting.default_write_interval_mins", value=write_interval) self.modify_property(property="reporting.default_size_time_size_unit", value=size_time_size_unit) self.modify_property(property="reporting.default_size_time_time_unit", value=size_time_time_unit) self.modify_property(property="reporting.default_size_unit", value=size_unit) self.modify_property(property="reporting.default_time_unit", value=time_unit) def cleanup(self): if self.reservation: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") if self.volume: self.tester.delete_volume(self.volume) if self.bucket: self.tester.clear_bucket(self.bucket) self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def instance(self): self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone) file_size_in_mb = 500 for instance in self.reservation.instances: assert isinstance(instance, EuInstance) self.volume = self.tester.create_volume(azone=self.zone, size=4) device_path = instance.attach_volume(self.volume) instance.sys("mkfs.ext3 -F " + device_path) instance.sys("mount " + device_path + " /mnt") ### Write to root fs instance.sys("dd if=/dev/zero of=/tmp/test.img count=" + str(file_size_in_mb) + " bs=1M") ### Write to volume instance.sys("dd if=/dev/zero of=/mnt/test.img count=" + str(file_size_in_mb) + " bs=1M") self.tester.sleep(180) for instance in self.reservation.instances: report_output = self.generate_report("instance", "csv", self.date) instance_lines = self.tester.grep(instance.id, report_output) for line in instance_lines: instance_data = self.parse_instance_line(line) #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line): if not re.match(instance_data.type, "m1.small"): raise Exception("Failed to find proper output for " + str(instance) + " type. Received: " + instance_data.type) if not int(instance_data.number) == 1: raise Exception("Failed to find proper output for " + str(instance) + " number. Received: " + instance_data.number) if not int(instance_data.unit_time) > 2: raise Exception("Failed to find proper output for " + str(instance) + " unit_time. Received: " + instance_data.unit_time) if not int(instance_data.disk_write) > 1000: raise Exception("Failed to find proper output for " + str(instance) + " disk_write. Received: " + instance_data.disk_write) if not int(instance_data.disk_time_write) > 200: raise Exception("Failed to find proper output for " + str(instance) + " disk_time_write. Received: " + instance_data.disk_time_write) def parse_instance_line(self, line): InstanceData = namedtuple( 'InstanceData', 'id type number unit_time cpu net_total_in net_total_out ' 'net_extern_in net_extern_out disk_read disk_write disk_iops_read ' 'disk_iops_write disk_time_read disk_time_write') values = line.split(",") return InstanceData(values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7], values[8], values[9], values[10], values[11], values[12], values[13], values[14]) def s3(self): self.bucket = self.tester.create_bucket( bucket_name="reporting-bucket-" + self.cur_time) key_size = 10 self.tester.debug("Creating random " + str(key_size) + "MB of data") rand_string = self.tester.id_generator(size=1024 * 1024 * 10) self.tester.upload_object(self.bucket.name, "reporting-key", contents=rand_string) self.tester.sleep(120) report_output = self.generate_report("s3", "csv", self.date) bucket_lines = self.tester.grep(self.bucket.name, report_output) for line in bucket_lines: bucket_data = self.parse_bucket_line(line) if not int(bucket_data.size) == 10: raise Exception('Failed to find proper size for %s' % str(self.bucket)) if not int(bucket_data.keys) == 1: raise Exception('Failed to find proper number of keys for %s' % str(self.bucket)) if not int(bucket_data.unit_time) > 16: raise Exception( 'Failed to find proper amount of usage for %s' % str(self.bucket)) def parse_bucket_line(self, line): BucketData = namedtuple('BucketData', 'name keys size unit_time') values = line.split(",") return BucketData(values[0], values[1], values[2], values[3]) def generate_report(self, type, format, end_date): return self.clc.sys("source " + self.tester.credpath + "/eucarc && eureport-generate-report -t " + str(type) + " -f " + str(format) + " -e " + str(end_date)) def modify_property(self, property, value): """ Modify a eucalyptus property through the command line euca-modify-property tool property Property to modify value Value to set it too """ command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str( property) + "=" + str(value) if self.clc.found(command, property): self.debug("Properly modified property " + property) else: raise Exception("Setting property " + property + " failed")
class InstanceBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object self.tester = Eucaops(credpath=self.args.credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None self.volume = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.reservation = None def clean_method(self): if self.reservation: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") if self.address: assert isinstance(self.address, Address) self.tester.release_address(self.address) if self.volume: self.tester.delete_volume(self.volume) self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def BasicInstanceChecks(self, zone=None): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Instance did not go to running') self.assertNotEqual(instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"), 'Did not find ephemeral storage at ' + instance.rootfs_device + "2") return self.reservation def ElasticIps(self, zone=None): """ This case was developed to test elastic IPs in Eucalyptus. This test case does not test instances that are launched using private-addressing option. The test case executes the following tests: - allocates an IP, associates the IP to the instance, then pings the instance. - disassociates the allocated IP, then pings the instance. - releases the allocated IP address If any of the tests fail, the test case will error out, logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: self.address = self.tester.allocate_address() self.assertTrue(self.address, 'Unable to allocate address') self.tester.associate_address(instance, self.address) instance.update() self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(self.address) self.address = None instance.update() self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate") return self.reservation def MaxSmallInstances(self, available_small=None, zone=None): """ This case was developed to test the maximum number of m1.small vm types a configured cloud can run. The test runs the maximum number of m1.small vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) if available_small is None: available_small = self.tester.get_available_vms() if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, min=available_small, max=available_small, zone=zone) self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Not all instances went to running') return self.reservation def LargestInstance(self, zone=None): """ This case was developed to test the maximum number of c1.xlarge vm types a configured cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if zone is None: zone = self.zone if self.reservation: self.tester.terminate_instances(self.reservation) self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, type="c1.xlarge", zone=zone) self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Not all instances went to running') return self.reservation def MetaData(self, zone=None): """ This case was developed to test the metadata service of an instance for consistency. The following meta-data attributes are tested: - public-keys/0/openssh-key - security-groups - instance-id - local-ipv4 - public-ipv4 - ami-id - ami-launch-index - reservation-id - placement/availability-zone - kernel-id - public-hostname - local-hostname - hostname - ramdisk-id - instance-type - any bad metadata that shouldn't be present. Missing nodes ['block-device-mapping/', 'ami-manifest-path'] If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue( re.match( instance.get_metadata("public-keys/0/openssh-key") [0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue( re.match( instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue( re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue( re.match( instance.get_metadata("local-ipv4")[0], instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue( re.match( instance.get_metadata("public-ipv4")[0], instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue( re.match( instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue( re.match( instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue( re.match( instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata') self.assertTrue( re.match( instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') self.assertTrue( re.match( instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue( re.match( instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue( re.match( instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue( re.match( instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata') self.assertTrue( re.match( instance.get_metadata("ramdisk-id")[0], instance.ramdisk), 'Incorrect ramdisk in metadata') #instance-type self.assertTrue( re.match( instance.get_metadata("instance-type")[0], instance.instance_type), 'Incorrect instance type in metadata') BAD_META_DATA_KEYS = ['foobar'] for key in BAD_META_DATA_KEYS: self.assertTrue( re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') return self.reservation def DNSResolveCheck(self, zone=None): """ This case was developed to test DNS resolution information for public/private DNS names and IP addresses. The tested DNS resolution behavior is expected to follow AWS EC2. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - nslookup on hostname; checks to see if it matches local-ipv4 - nslookup on local-hostname; check to see if it matches local-ipv4 - nslookup on local-ipv4; check to see if it matches local-hostname - nslookup on public-hostname; check to see if it matches local-ipv4 - nslookup on public-ipv4; check to see if it matches public-host If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys( "nslookup " + instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys( "nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0]) [3]), "DNS lookup failed for private IP address.") # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname self.assertTrue( re.search( instance.get_metadata("local-hostname")[0], instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0]) [4]), "Incorrect DNS resolution for private IP address") # Perform DNS resolution against public IP and public DNS name # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys( "nslookup " + instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.") # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys( "nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0]) [3]), "DNS lookup failed for public IP address.") # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname self.assertTrue( re.search( instance.get_metadata("public-hostname")[0], instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0]) [4]), "Incorrect DNS resolution for public IP address") return self.reservation def DNSCheck(self, zone=None): """ This case was developed to test to make sure Eucalyptus Dynamic DNS reports correct information for public/private IP address and DNS names passed to meta-data service. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - check to see if local-ipv4 and local-hostname are not the same - check to see if public-ipv4 and public-hostname are not the same If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Make sure that private_ip_address is not the same as local-hostname self.assertFalse( re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on') # Make sure that ip_address is not the same as public-hostname self.assertFalse( re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on') return self.reservation def Reboot(self, zone=None): """ This case was developed to test IP connectivity and volume attachment after instance reboot. The following tests are done for this test case: - creates a 1 gig EBS volume, then attach volume - reboot instance - attempts to connect to instance via ssh - checks to see if EBS volume is attached - detaches volume - deletes volume If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ### Create 1GB volume in first AZ self.volume = self.tester.create_volume(instance.placement, 1) self.volume_device = instance.attach_volume(self.volume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(self.volume) self.tester.delete_volume(self.volume) self.volume = None return self.reservation def run_terminate(self): reservation = None try: reservation = self.tester.run_instance(image=self.image, zone=self.zone, keypair=self.keypair.name, group=self.group.name) self.tester.terminate_instances(reservation) return 0 except Exception, e: if reservation: self.tester.terminate_instances(reservation) return 1
class InstanceBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.region: self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None self.volume = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.reservation = None def clean_method(self): if self.reservation: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") if self.address: assert isinstance(self.address,Address) self.tester.release_address(self.address) if self.volume: self.tester.delete_volume(self.volume) self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def BasicInstanceChecks(self, zone = None): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running') self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"), 'Did not find ephemeral storage at ' + instance.rootfs_device + "2") return self.reservation def ElasticIps(self, zone = None): """ This case was developed to test elastic IPs in Eucalyptus. This test case does not test instances that are launched using private-addressing option. The test case executes the following tests: - allocates an IP, associates the IP to the instance, then pings the instance. - disassociates the allocated IP, then pings the instance. - releases the allocated IP address If any of the tests fail, the test case will error out, logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,zone=zone) for instance in self.reservation.instances: self.address = self.tester.allocate_address() self.assertTrue(self.address,'Unable to allocate address') self.tester.associate_address(instance, self.address) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(self.address) self.address = None instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate") return self.reservation def MaxSmallInstances(self, available_small=None,zone = None): """ This case was developed to test the maximum number of m1.small vm types a configured cloud can run. The test runs the maximum number of m1.small vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) if available_small is None: available_small = self.tester.get_available_vms() if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small, zone=zone) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') return self.reservation def LargestInstance(self, zone = None): """ This case was developed to test the maximum number of c1.xlarge vm types a configured cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if zone is None: zone = self.zone if self.reservation: self.tester.terminate_instances(self.reservation) self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,type="c1.xlarge",zone=zone) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') return self.reservation def MetaData(self, zone=None): """ This case was developed to test the metadata service of an instance for consistency. The following meta-data attributes are tested: - public-keys/0/openssh-key - security-groups - instance-id - local-ipv4 - public-ipv4 - ami-id - ami-launch-index - reservation-id - placement/availability-zone - kernel-id - public-hostname - local-hostname - hostname - ramdisk-id - instance-type - any bad metadata that shouldn't be present. Missing nodes ['block-device-mapping/', 'ami-manifest-path'] If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata') self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata') self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata') BAD_META_DATA_KEYS = ['foobar'] for key in BAD_META_DATA_KEYS: self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') return self.reservation def DNSResolveCheck(self, zone=None): """ This case was developed to test DNS resolution information for public/private DNS names and IP addresses. The tested DNS resolution behavior is expected to follow AWS EC2. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - nslookup on hostname; checks to see if it matches local-ipv4 - nslookup on local-hostname; check to see if it matches local-ipv4 - nslookup on local-ipv4; check to see if it matches local-hostname - nslookup on public-hostname; check to see if it matches local-ipv4 - nslookup on public-ipv4; check to see if it matches public-host If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0])[3]), "DNS lookup failed for private IP address.") # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname self.assertTrue(re.search(instance.get_metadata("local-hostname")[0], instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0])[4]), "Incorrect DNS resolution for private IP address") # Perform DNS resolution against public IP and public DNS name # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.") # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.") # Check to see if nslookup was able to resolve self.assertTrue(re.search('answer\:', instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0])[3]), "DNS lookup failed for public IP address.") # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname self.assertTrue(re.search(instance.get_metadata("public-hostname")[0], instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0])[4]), "Incorrect DNS resolution for public IP address") return self.reservation def DNSCheck(self, zone=None): """ This case was developed to test to make sure Eucalyptus Dynamic DNS reports correct information for public/private IP address and DNS names passed to meta-data service. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - check to see if local-ipv4 and local-hostname are not the same - check to see if public-ipv4 and public-hostname are not the same If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Make sure that private_ip_address is not the same as local-hostname self.assertFalse(re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on') # Make sure that ip_address is not the same as public-hostname self.assertFalse(re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on') return self.reservation def Reboot(self, zone=None): """ This case was developed to test IP connectivity and volume attachment after instance reboot. The following tests are done for this test case: - creates a 1 gig EBS volume, then attach volume - reboot instance - attempts to connect to instance via ssh - checks to see if EBS volume is attached - detaches volume - deletes volume If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ### Create 1GB volume in first AZ self.volume = self.tester.create_volume(instance.placement, 1) self.volume_device = instance.attach_volume(self.volume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(self.volume) self.tester.delete_volume(self.volume) self.volume = None return self.reservation def run_terminate(self): reservation = None try: reservation = self.tester.run_instance(image=self.image,zone=self.zone, keypair=self.keypair.name, group=self.group.name) self.tester.terminate_instances(reservation) return 0 except Exception, e: if reservation: self.tester.terminate_instances(reservation) return 1
class Euca4229(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.cond = 0 self.tester = Eucaops(config_file=self.conf, password="******") self.doAuth() self.sbin = self.tester.eucapath + "/usr/sbin/" self.source = "source " + self.tester.credpath + "/eucarc && " def tearDown(self): self.tester.sys( self.source + self.sbin + "euca-modify-property -p walrus.storagemaxtotalsnapshotsizeingb=50" ) self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def runInstances(self, numMax): #Start instance self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group, min=1, max=numMax, is_reachable=False) # Make sure the instance is running for instance in self.reservation.instances: if instance.state == "running": self.ip = instance.public_dns_name self.instanceid = instance.id def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testEuca4229(self): # Get availibility zone self.zone = self.tester.get_zones().pop() # Get number of already existing snapshots: self.num_snaps_before = str( self.tester.sys(self.source + "euca-describe-snapshots")).count("SNAPSHOT") # Set storagemaxtotalsnapshotsizeingb self.tester.sys( self.source + self.sbin + "euca-modify-property -p walrus.storagemaxtotalsnapshotsizeingb=1") # create volume larger than storagemaxtotalsnapshotsizeingb = 1GB self.volume = self.tester.create_volume(self.zone, 2, timeout=100) # make sure the exception is thrown try: self.snap = self.tester.create_snapshot(self.volume.id, description="snap-4229") except Exception as detail: self.cond = str(detail).count('maximum allowed object size') # Get the current number of snapshots self.num_snaps_after = str( self.tester.sys(self.source + "euca-describe-snapshots")).count("SNAPSHOT") # Check to see if the the error was thrown and make sure no new snapshot-matadata was created if self.cond >= 1 and self.num_snaps_after == self.num_snaps_before: self.tester.debug("SUCCESS no new snapshot-metadata") pass else: self.fail("FAIL new snapshot-metadata")
class LoadGenerator(unittest.TestCase): def setUp(self): # Setup basic eutester object if options.config_file: self.tester = Eucaops(config_file=options.config_file, password=options.clc_password) else: print "\tNeed to pass --config_file option. Try --help for more information\n" exit(1) ### Grab zone for volume tests zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.volumes = [] self.statuses = [] def tearDown(self): """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() """ Print the results of volumes created and total volumes of cloud """ self.current_ebs_reporting() """ Print all the volumes' statuses for the entire cloud """ self.overall_ebs_reporting() """ Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS - * eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info * eucalyptus_cloud relations: metadata_volumes """ self.iscivolumeinfo_db_dump() self.iscsimetadata_db_dump() self.volumes_db_dump() self.cloudmetadata_db_dump() self.storagestats_db_dump() """ Now destroy volumes created and reached available state from test """ for vol in self.volumes: if vol.status == 'available': self.tester.delete_volume(vol) self.volumes = None self.statuses = None self.tester = None def current_ebs_reporting(self): """ Print the results of volumes created and total volumes of cloud """ found_creating = self.statuses.count("creating") found_available = self.statuses.count("available") found_deleting = self.statuses.count("deleting") found_deleted = self.statuses.count("deleted") found_failed = self.statuses.count("failed") self.tester.debug("##########################################\n") self.tester.debug("\t**** Results of Finished Test ****\n") self.tester.debug("\t" + str(found_creating) + " Volumes in CREATING state.\n") self.tester.debug("\t" + str(found_available) + " Volumes in AVAILABLE state.\n") self.tester.debug("\t" + str(found_deleting) + " Volumes in DELETING state.\n") self.tester.debug("\t" + str(found_deleted) + " Volumes in DELETED state.\n") self.tester.debug("\t" + str(found_failed) + " Volumes in FAILED state.\n") self.tester.debug("##########################################\n") found_creating = None found_available = None found_deleting = None found_deleted = None found_failed = None def overall_ebs_reporting(self): """ Print all the volumes' statuses for the entire cloud """ volumes = self.tester.get_volumes() statuses = [] for master_vol in volumes: statuses.append(master_vol.status) overall_creating = statuses.count("creating") overall_available = statuses.count("available") overall_deleting = statuses.count("deleting") overall_deleted = statuses.count("deleted") overall_failed = statuses.count("failed") """ Grab cloud property for volume location to get stats of files. """ volumes_dir = "" for machine in self.tester.get_component_machines("clc"): if volumes_dir == "": volumes_dir = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'")) overall_ebs_size = len(volumes) ebs_filesystem_size = "" for machine in self.tester.get_component_machines("sc"): ebs_filesystem_size = (machine.sys("du -sh " + volumes_dir[0])) self.tester.debug("##########################################\n") self.tester.debug("\t**** Results of Current Volumes on Cloud ****\n") self.tester.debug("\t" + str(overall_creating) + " Volumes in CREATING state.\n") self.tester.debug("\t" + str(overall_available) + " Volumes in AVAILABLE state.\n") self.tester.debug("\t" + str(overall_deleting) + " Volumes in DELETING state.\n") self.tester.debug("\t" + str(overall_deleted) + " Volumes in DELETED state.\n") self.tester.debug("\t" + str(overall_failed) + " Volumes in FAILED state.\n") self.tester.debug("==========================================\n") self.tester.debug("Sum of All EBS Volume Sizes (in GBs): " + str(overall_ebs_size) + "\n") self.tester.debug("Disk Space Used under Cloud defined Storage Directory [ " + volumes_dir[0] + " ]: " + ebs_filesystem_size[0] + "\n") self.tester.debug("##########################################\n") """ Make sure and clean up volumes that got to "available" state; this is mostly for EbsBotoStress cleanup """ for vol in volumes: if vol.status == 'available': self.tester.delete_volume(vol) """ Clean up everything else """ statuses = None volumes = None ebs_filesystem_size = None overall_ebs_size = None volumes_dir = None overall_creating = None overall_available = None overall_deleting = None overall_deleted = None overall_failed = None def iscivolumeinfo_db_dump(self): """ Print contents of iscsivolumeinfo relation in eucalyptus_storage table """ now = datetime.datetime.now() iscsivolinfo_file = "~/iscsivolinfo_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsivolumeinfo' -o " + iscsivolinfo_file) db_dump = (machine.sys("cat " + iscsivolinfo_file)) machine.sys("rm -rf " + iscsivolinfo_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of iscsivolumeinfo relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None iscsivolinfo_file = None db_dump = None def iscsimetadata_db_dump(self): """ Print contents of iscsimetadata relation in eucalyptus_storage table """ now = datetime.datetime.now() iscsimetadata_file = "~/iscsimetadata_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from iscsimetadata' -o " + iscsimetadata_file) db_dump = (machine.sys("cat " + iscsimetadata_file)) machine.sys("rm -rf " + iscsimetadata_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of iscsimetadata relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None iscsimetadata_file= None db_dump = None def volumes_db_dump(self): """ Print contents of volumes relation in eucalyptus_storage table """ now = datetime.datetime.now() volumes_file = "~/volumes_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from volumes' -o " + volumes_file) db_dump = (machine.sys("cat " + volumes_file)) machine.sys("rm -rf " + volumes_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of volume relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None volumes_file= None db_dump = None def cloudmetadata_db_dump(self): """ Print contents of metadata_volumes relation in eucalyptus_cloud table """ now = datetime.datetime.now() cloudmetadata_file = "~/cloudmetadata_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_cloud -c 'select * from metadata_volumes' -o " + cloudmetadata_file) db_dump = (machine.sys("cat " + cloudmetadata_file)) machine.sys("rm -rf " + cloudmetadata_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of metadata_volumes relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None cloudmetadata_file= None db_dump = None def storagestats_db_dump(self): """ Print contents of storage_stats_info relation in eucalyptus_storage table """ now = datetime.datetime.now() storagestats_file = "~/storagestats_file-" + str(now.microsecond) + ".txt" db_dump = "" for machine in self.tester.get_component_machines("clc"): machine.sys("psql -p 8777 -x -e -t -S -h ${EUCALYPTUS}/var/lib/eucalyptus/db/data eucalyptus_storage -c 'select * from storage_stats_info' -o " + storagestats_file) db_dump = (machine.sys("cat " + storagestats_file)) machine.sys("rm -rf " + storagestats_file) self.tester.debug("##########################################\n") self.tester.debug("\t**** Content of storage_stats_info relation ****\n") for content in db_dump: self.tester.debug(content + "\n") self.tester.debug("##########################################\n") now = None storagestats_file= None db_dump = None def run_command_list(self,machine, list): for command in list: machine.sys(command) def get_clc_stats(self): basic_commands = ['df -B M', 'ps aux', 'free', 'uptime'] clc_commands = ['euca-describe-properties | grep volume'] clc_status = clc_commands + basic_commands for machine in self.tester.get_component_machines("clc"): for command in clc_status: machine.sys("source " + self.tester.credpath + "/eucarc && " + command) def get_sc_stats(self): basic_commands = ['df -B M', 'ps aux', 'free', 'uptime'] """ Grab cloud property for volume location to get stats of files. """ volumes_dir = "" for machine in self.tester.get_component_machines("clc"): if volumes_dir == "": volumes_dir = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep volumesdir | awk '{print $3}'")) sc_commands = ['tgtadm --lld iscsi --op show --mode account', 'tgtadm --lld iscsi --op show --mode target', 'du -sh ' + volumes_dir[0], 'lvdisplay | grep "/dev/vg-"', 'vgdisplay', 'pvdisplay', 'losetup -a | grep ' + volumes_dir[0] + ' | wc -l', 'ls -l ' + volumes_dir[0]] sc_status = basic_commands + sc_commands for machine in self.tester.get_component_machines("sc"): self.run_command_list(machine, sc_status) def GenerateVolumesLoad(self): """ Grab EBS Timeout property of Cloud """ ebs_timeout = "" for machine in self.tester.get_component_machines("clc"): if ebs_timeout == "": ebs_timeout = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'")) """ Create volumes in series """ for i in xrange(options.number_of_vol): volume = self.tester.create_volume(self.zone) if volume is not None: self.volumes.append(volume) self.statuses.append(volume.status) """ Sleep the EBS Timeout property; only have to call it once """ self.tester.debug("###\n") self.tester.debug("###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n") self.tester.debug("###\n") self.tester.sleep(float(ebs_timeout[0])) def GenerateVolumesBoto(self): """ Grab EBS Timeout property of Cloud """ ebs_timeout = "" for machine in self.tester.get_component_machines("clc"): if ebs_timeout == "": ebs_timeout = (machine.sys("source " + self.tester.credpath + "/eucarc && euca-describe-properties | grep ebs_volume_creation_timeout | awk '{print $3}'")) """ Create 1 Gig volumes in series """ vol_size = 1 for i in xrange(options.number_of_vol): volume = self.tester.ec2.create_volume(vol_size, self.zone) if volume is not None: self.tester.debug("Volume (" + volume.id + ") is in (" + volume.status + ") state.\n") self.volumes.append(volume) self.statuses.append(volume.status) """ Sleep the EBS Timeout property; only have to call it once """ self.tester.debug("###\n") self.tester.debug("###\tWaiting till EBS Timeout is reached; sleep for " + ebs_timeout[0] + " seconds.\n") self.tester.debug("###\n") self.tester.sleep(float(ebs_timeout[0])) def GenerateCloudStatistics(self): """ Grab status of all volumes on cloud, along with database information """ self.overall_ebs_reporting() """ Display information in eucalyptus_storage,eucalyptus_cloud tables related to EBS - * eucalyptus_storage relations: iscsivolumeinfo, iscsimetadata, volumes, storage_stats_info * eucalyptus_cloud relations: metadata_volumes """ self.iscivolumeinfo_db_dump() self.iscsimetadata_db_dump() self.volumes_db_dump() self.cloudmetadata_db_dump() self.storagestats_db_dump() def EbsStress(self, testcase="GenerateVolumesLoad"): """ Generate volume load; For each thread created - options.number_of_threads - options.number_of_vol will be created """ from multiprocessing import Process from multiprocessing import Queue ### Increase time to by step seconds on each iteration ### This also gives enough time for creds to be pulled from CLC step = 10 """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run GenerateVolumesLoad testcase seconds apart for i in xrange(options.number_of_threads): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i)) p.start() fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result) ) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests") self.tester.debug("Successfully completed EbsStress test") def EbsBotoStress(self, testcase="GenerateVolumesBoto"): """ Generate volume load; For each thread created - options.number_of_threads - options.number_of_vol will be created """ from multiprocessing import Process from multiprocessing import Queue ### Increase time to by step seconds on each iteration ### This also gives enough time for creds to be pulled from CLC step = 10 """ If extra debugging is set, print additional CLC and SC information """ if options.print_debug is True: self.get_clc_stats() self.get_sc_stats() thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run GenerateVolumesLoad testcase seconds apart for i in xrange(options.number_of_threads): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i)) p.start() fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result) ) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: self.tester.critical("Failure detected in one of the " + str(fail_count) + " " + testcase + " tests") self.tester.debug("Successfully completed EbsBotoStress test") def run_testcase_thread(self, queue,delay=20, testname=None): ### Thread that runs a testcase (function) and returns its pass or fail result self.tester.sleep(delay) try: result = unittest.TextTestRunner(verbosity=2).run(LoadGenerator(testname)) except Exception, e: queue.put(1) raise e if result.wasSuccessful(): self.tester.debug("Passed test: " + testname) queue.put(0) return False else: self.tester.debug("Failed test: " + testname) queue.put(1) return True
class ReportingBasics(EutesterTestCase): def __init__(self, config_file=None, password=None): self.setuptestcase() # Setup basic eutester object self.tester = Eucaops( config_file=config_file, password=password) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.volume = None self.bucket = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.clean_method = self.cleanup self.cur_time = str(int(time.time())) date_fields = time.localtime() self.date = str(date_fields.tm_year) + "-" + str(date_fields.tm_mon) + "-31" clcs = self.tester.get_component_machines("clc") if len(clcs) is 0: raise Exception("No CLC found") else: self.clc = clcs[0] poll_interval = 1 write_interval = 1 size_time_size_unit = "MB" size_time_time_unit = "MINS" size_unit = "MB" time_unit = "MINS" self.modify_property(property="reporting.default_poll_interval_mins",value=poll_interval) self.modify_property(property="reporting.default_write_interval_mins",value=write_interval) self.modify_property(property="reporting.default_size_time_size_unit",value=size_time_size_unit) self.modify_property(property="reporting.default_size_time_time_unit",value=size_time_time_unit) self.modify_property(property="reporting.default_size_unit",value=size_unit) self.modify_property(property="reporting.default_time_unit",value=time_unit) def cleanup(self): if self.reservation: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") if self.volume: self.tester.delete_volume(self.volume) if self.bucket: self.tester.clear_bucket(self.bucket) self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def instance(self): self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone) file_size_in_mb = 500 for instance in self.reservation.instances: assert isinstance(instance, EuInstance) self.volume = self.tester.create_volume(zone=self.zone, size=4) device_path = instance.attach_volume(self.volume) instance.sys("mkfs.ext3 -F " + device_path) instance.sys("mount " + device_path + " /mnt") ### Write to root fs instance.sys("dd if=/dev/zero of=/tmp/test.img count=" + str(file_size_in_mb) + " bs=1M") ### Write to volume instance.sys("dd if=/dev/zero of=/mnt/test.img count=" + str(file_size_in_mb) + " bs=1M") self.tester.sleep(180) for instance in self.reservation.instances: report_output = self.generate_report("instance","csv", self.date) instance_lines = self.tester.grep(instance.id, report_output) for line in instance_lines: instance_data = self.parse_instance_line(line) #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line): if not re.match(instance_data.type, "m1.small"): raise Exception("Failed to find proper output for " + str(instance) + " type. Received: " + instance_data.type ) if not int(instance_data.number) == 1: raise Exception("Failed to find proper output for " + str(instance) + " number. Received: " + instance_data.number ) if not int(instance_data.unit_time) > 2 : raise Exception("Failed to find proper output for " + str(instance) + " unit_time. Received: " + instance_data.unit_time ) if not int(instance_data.disk_write) > 1000: raise Exception("Failed to find proper output for " + str(instance) + " disk_write. Received: " + instance_data.disk_write ) if not int(instance_data.disk_time_write) > 200: raise Exception("Failed to find proper output for " + str(instance) + " disk_time_write. Received: " + instance_data.disk_time_write ) def parse_instance_line(self, line): InstanceData = namedtuple('InstanceData', 'id type number unit_time cpu net_total_in net_total_out ' 'net_extern_in net_extern_out disk_read disk_write disk_iops_read ' 'disk_iops_write disk_time_read disk_time_write') values = line.split(",") return InstanceData(values[0],values[1],values[2],values[3],values[4],values[5],values[6],values[7], values[8],values[9],values[10],values[11],values[12],values[13],values[14]) def s3(self): self.bucket = self.tester.create_bucket(bucket_name="reporting-bucket-" + self.cur_time) key_size = 10 self.tester.debug("Creating random " + str(key_size) + "MB of data") rand_string = self.tester.id_generator(size=1024*1024*10) self.tester.upload_object(self.bucket.name, "reporting-key" ,contents=rand_string) self.tester.sleep(120) report_output = self.generate_report("s3", "csv",self.date) bucket_lines = self.tester.grep(self.bucket.name, report_output) for line in bucket_lines: bucket_data = self.parse_bucket_line(line) if not int(bucket_data.size) == 10: raise Exception('Failed to find proper size for %s' % str(self.bucket)) if not int(bucket_data.keys) == 1: raise Exception('Failed to find proper number of keys for %s' % str(self.bucket)) if not int(bucket_data.unit_time) > 16: raise Exception('Failed to find proper amount of usage for %s' % str(self.bucket)) def parse_bucket_line(self, line): BucketData = namedtuple('BucketData', 'name keys size unit_time') values = line.split(",") return BucketData(values[0],values[1],values[2],values[3] ) def generate_report(self, type, format, end_date): return self.clc.sys("source " + self.tester.credpath + "/eucarc && eureport-generate-report -t " + str(type) +" -f " + str(format) + " -e " + str(end_date) ) def modify_property(self, property, value): """ Modify a eucalyptus property through the command line euca-modify-property tool property Property to modify value Value to set it too """ command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(property) + "=" + str(value) if self.clc.found(command, property): self.debug("Properly modified property " + property) else: raise Exception("Setting property " + property + " failed")
class InstanceBasics(unittest.TestCase): def setUp(self, credpath=None): # Setup basic eutester object if credpath is None: credpath = arg_credpath self.tester = Eucaops(credpath=credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def BasicInstanceChecks(self, zone=None): """Instance checks including reachability and ephemeral storage""" if zone is None: zone = self.zone if self.reservation is None: self.reservation = self.tester.run_instance( self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Instance did not go to running') self.assertNotEqual(instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance') self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"), 'Did not find ephemeral storage at ' + instance.rootfs_device + "2") return self.reservation def ElasticIps(self, zone=None): """ Basic test for elastic IPs Allocate an IP, associate it with an instance, ping the instance Disassociate the IP, ping the instance Release the address""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address, 'Unable to allocate address') self.tester.associate_address(instance, address) instance.update() self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(address) instance.update() self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate") return self.reservation def MaxSmallInstances(self, available_small=None, zone=None): """Run the maximum m1.smalls available""" if available_small is None: available_small = self.tester.get_available_vms() if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, min=available_small, max=available_small, zone=zone) self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Not all instances went to running') return self.reservation def LargestInstance(self, zone=None): """Run 1 of the largest instance c1.xlarge""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, type="c1.xlarge", zone=zone) self.assertTrue(self.tester.wait_for_reservation(self.reservation), 'Not all instances went to running') return self.reservation def MetaData(self, zone=None): """Check metadata for consistency""" # Missing nodes # ['block-device-mapping/', 'ami-manifest-path'] if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue( re.match( instance.get_metadata("public-keys/0/openssh-key") [0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue( re.match( instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue( re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue( re.match( instance.get_metadata("local-ipv4")[0], instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue( re.match( instance.get_metadata("public-ipv4")[0], instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue( re.match( instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue( re.match( instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue( re.match( instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata') self.assertTrue( re.match( instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') self.assertTrue( re.match( instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue( re.match( instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue( re.match( instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue( re.match( instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata') self.assertTrue( re.match( instance.get_metadata("ramdisk-id")[0], instance.ramdisk), 'Incorrect ramdisk in metadata') #instance-type self.assertTrue( re.match( instance.get_metadata("instance-type")[0], instance.instance_type), 'Incorrect instance type in metadata') BAD_META_DATA_KEYS = ['foobar'] for key in BAD_META_DATA_KEYS: self.assertTrue( re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') return self.reservation def DNSResolveCheck(self, zone=None): """Check DNS resolution information for public/private DNS names and IP addresses. The DNS resolution behavior follows AWS EC2.""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys( "nslookup " + instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.") # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys( "nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0]) [3]), "DNS lookup failed for private IP address.") # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname self.assertTrue( re.search( instance.get_metadata("local-hostname")[0], instance.sys("nslookup " + instance.get_metadata("local-ipv4")[0]) [4]), "Incorrect DNS resolution for private IP address") # Perform DNS resolution against public IP and public DNS name # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys( "nslookup " + instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.") # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address self.assertTrue( re.search( instance.get_metadata("local-ipv4")[0], instance.sys( "nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.") # Check to see if nslookup was able to resolve self.assertTrue( re.search( 'answer\:', instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0]) [3]), "DNS lookup failed for public IP address.") # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname self.assertTrue( re.search( instance.get_metadata("public-hostname")[0], instance.sys("nslookup " + instance.get_metadata("public-ipv4")[0]) [4]), "Incorrect DNS resolution for public IP address") return self.reservation def DNSCheck(self, zone=None): """Check to make sure Dynamic DNS reports correct information for public/private IP address and DNS names""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: # Test to see if Dynamic DNS has been configured # if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]): # Make sure that private_ip_address is not the same as local-hostname self.assertFalse( re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on') # Make sure that ip_address is not the same as public-hostname self.assertFalse( re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on') return self.reservation def Reboot(self, zone=None): """Reboot instance ensure IP connectivity and volumes stay attached""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: ### Create 1GB volume in first AZ self.volume = self.tester.create_volume(instance.placement, 1) euvolume = EuVolume.make_euvol_from_vol(self.volume) self.volume_device = instance.attach_euvolume(euvolume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(euvolume) return self.reservation def Churn(self, testcase="BasicInstanceChecks"): """Start instances and stop them before they are running, increase time to terminate on each iteration""" from multiprocessing import Process from multiprocessing import Queue ### Increase time to terminate by step seconds on each iteration step = 10 ## Run through count iterations of test count = self.tester.get_available_vms("m1.small") / 2 thread_pool = [] queue_pool = [] ## Start asynchronous activity ## Run 5 basic instance check instances 10s apart for i in xrange(count): q = Queue() queue_pool.append(q) p = Process(target=self.run_testcase_thread, args=(q, step * i, testcase)) thread_pool.append(p) self.tester.debug("Starting Thread " + str(i) + " in " + str(step * i)) p.start() ### While the other tests are running, run and terminate count instances with a 10s sleep in between for i in xrange(count): self.reservation = self.image.run() self.tester.debug("Sleeping for " + str(step) + " seconds before terminating instances") self.tester.sleep(step) for instance in self.reservation.instances: instance.terminate() self.assertTrue( self.tester.wait_for_instance(instance, "terminated"), "Instance did not go to terminated") ### Once the previous test is complete rerun the BasicInstanceChecks test case ### Wait for an instance to become available count = self.tester.get_available_vms("m1.small") poll_count = 30 while poll_count > 0: self.tester.sleep(5) count = self.tester.get_available_vms("m1.small") if count > 0: self.tester.debug( "There is an available VM to use for final test") break poll_count -= 1 fail_count = 0 ### Block until the script returns a result for queue in queue_pool: test_result = queue.get(True) self.tester.debug("Got Result: " + str(test_result)) fail_count += test_result for thread in thread_pool: thread.join() if fail_count > 0: raise Exception("Failure detected in one of the " + str(count) + " Basic Instance tests") self.tester.debug("Successfully completed churn test") def PrivateIPAddressing(self, zone=None): """Basic test to run an instance with Private only IP and later allocate/associate/diassociate/release an Elastic IP. In the process check after diassociate the instance has only got private IP or new Public IP gets associated to it""" if zone is None: zone = self.zone self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, private_addressing=True, zone=zone) for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address, 'Unable to allocate address') self.assertTrue(self.tester.associate_address(instance, address)) self.tester.sleep(30) instance.update() self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") address.disassociate() self.tester.sleep(30) instance.update() self.assertFalse( self.tester.ping(instance.public_dns_name), "Was able to ping instance that should have only had a private IP" ) address.release() if instance.public_dns_name != instance.private_dns_name: self.fail("Instance received a new public IP: " + instance.public_dns_name) return self.reservation def ReuseAddresses(self, zone=None): """ Run instances in series and ensure they get the same address""" prev_address = None if zone is None: zone = self.zone ### Run the test 5 times in a row for i in xrange(5): self.reservation = self.tester.run_instance( keypair=self.keypair.name, group=self.group.name, zone=zone) for instance in self.reservation.instances: if prev_address is not None: self.assertTrue( re.search(str(prev_address), str(instance.public_dns_name)), str(prev_address) + " Address did not get reused but rather " + str(instance.public_dns_name)) prev_address = instance.public_dns_name self.tester.terminate_instances(self.reservation) def run_testcase_thread(self, queue, delay=20, name="MetaData"): ### Thread that runs a testcase (function) and returns its pass or fail result self.tester.sleep(delay) try: result = unittest.TextTestRunner(verbosity=2).run( InstanceBasics(name)) except Exception, e: queue.put(1) raise e if result.wasSuccessful(): self.tester.debug("Passed test: " + name) queue.put(0) return False else: self.tester.debug("Failed test: " + name) queue.put(1) return True
class TaggingBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.region: self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None self.volume = None self.snapshot = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name self.reservation = None def clean_method(self): ### Terminate the reservation if it is still up if self.reservation: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") if self.volume: self.tester.delete_volume(self.volume,timeout=600) if self.snapshot: self.tester.delete_snapshot(self.snapshot) ### DELETE group self.tester.delete_group(self.group) ### Delete keypair in cloud and from filesystem self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def InstanceTagging(self): """ This case was developed to exercise tagging of an instance resource """ if not self.reservation: self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name) test_instance = None tags = { u'name': 'instance-tag-test', u'location' : 'over there'} for instance in self.reservation.instances: instance.create_tags(tags) test_instance = instance ### Test Filtering , u'tag:location' : 'over there' tag_filter = { u'tag:name': u'instance-tag-test'} reservations = self.tester.ec2.get_all_instances(filters=tag_filter) if len(reservations) != 1: raise Exception('Filter for instances returned too many results') reservation = reservations[0] if self.reservation.id not in reservation.id: raise Exception('Wrong instance id returned after filtering, Expected: ' + self.reservation.id + ' Received: ' + reservation.id ) ### Test non-tag Filtering ### Filters can be found here, most will be tested manually, but a spot check should be added ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeInstances.html new_group = self.tester.add_group("filter-test") self.tester.authorize_group_by_name(group_name=new_group.name ) self.tester.authorize_group_by_name(group_name=new_group.name, port=-1, protocol="icmp" ) filter_test_reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=new_group.name) keypair_filter = {u'key-name': self.keypair.name} group_filter = {u'group-name': new_group.name} keypair_match = self.tester.ec2.get_all_instances(filters=keypair_filter) group_match = self.tester.ec2.get_all_instances(filters=group_filter) self.tester.terminate_instances(filter_test_reservation) self.tester.delete_group(new_group) self.tester.delete_keypair(self.keypair) if len(group_match) != 1: raise Exception("Non-tag Filtering of instances by group name: " + str(len(group_match)) + " expected: 1") if len(keypair_match) != 2: raise Exception("Non-tag Filtering of instances by keypair name: " + str(len(keypair_match)) + " expected: 2") ### Test Deletion test_instance.delete_tags(tags) instances = self.tester.ec2.get_all_instances(filters=tag_filter) if len(instances) != 0: raise Exception('Filter returned instances when there shouldnt be any') if all(item in test_instance.tags.items() for item in tags.items()): raise Exception('Tags still returned after deletion') #self.test_restrictions(test_instance) #self.test_in_series(test_instance) self.tester.terminate_instances(self.reservation) self.reservation = None def VolumeTagging(self): """ This case was developed to exercise tagging of an instance resource """ self.volume = self.tester.create_volume(zone=self.zone) tag_id = 'volume-tag-test-' + str(int(time.time())) tags = { u'name': tag_id, u'location' : 'datacenter'} self.volume.create_tags(tags) ### Test Filtering tag_filter = { u'tag:name': tag_id} volumes = self.tester.ec2.get_all_volumes(filters=tag_filter) if len(volumes) is 0: raise Exception('Filter for volumes returned no results:"{0}", filter:"{1}"' .format(volumes, tag_filter)) if len(volumes) is not 1: raise Exception('Filter for volumes returned too many results:"{0}", filter:"{1}"' .format(volumes, tag_filter)) if volumes[0].id != self.volume.id: raise Exception('Wrong volume ID returned after filtering:"{0}", filter:"{1}"' .format(volumes, tag_filter)) ### Test non-tag Filtering ### Filters can be found here, most will be tested manually, but a spot check should be added ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeImages.html vol_size = 3 filter_test_volume_1 = self.tester.create_volume(zone=self.zone, size=vol_size) filter_test_volume_2 = self.tester.create_volume(zone=self.zone, size=vol_size) size_filter = {u'size': vol_size } id_filter = {u'volume-id': self.volume.id} size_match = self.tester.ec2.get_all_volumes(filters=size_filter) id_match = self.tester.ec2.get_all_volumes(filters=id_filter) self.tester.delete_volume(filter_test_volume_1) self.tester.delete_volume(filter_test_volume_2) for sz_vol in size_match: if sz_vol.size != vol_size: try: self.debug('Size filter returned the following volumes:{0}'.format(size_match)) except: pass raise Exception('Filtering of volumes by size:"{0}" returned a volume of ' 'wrong size. vol:{1}, size:{2}' .format(vol_size, sz_vol, sz_vol.size )) if len(id_match) != 1: try: self.debug('Id filter returned the following volumes:{0}'.format(id_match)) except: pass raise Exception("Filtering of volumes by id:'{0}' returned {1} volumes, expected 1" .format(self.volume.id, len(id_match or []))) ### Test Deletion self.volume.delete_tags(tags) volumes = self.tester.ec2.get_all_volumes(filters=tag_filter) if len(volumes) != 0: raise Exception('Filter returned volumes when there shouldnt be any') if self.volume.tags != {}: raise Exception('Tags still returned after deleting them from volume') #self.test_restrictions(self.volume) #self.test_in_series(self.volume) def SnapshotTagging(self): """ This case was developed to exercise tagging of an instance resource """ if not self.volume: self.volume = self.tester.create_volume(zone=self.zone) self.snapshot = self.tester.create_snapshot_from_volume(self.volume) tags = { u'name': 'snapshot-tag-test', u'location' : 'over there'} self.snapshot.create_tags(tags) ### Test Filtering , u'tag:location' : 'over there' tag_filter = { u'tag:name': 'snapshot-tag-test'} snapshots = self.tester.ec2.get_all_snapshots(filters=tag_filter) if len(snapshots) != 1: raise Exception('Filter for instances returned too many results') if snapshots[0].id != self.snapshot.id: raise Exception('Wrong instance id returned after filtering') ### Test non-tag Filtering ### Filters can be found here, most will be tested manually, but a spot check should be added ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeSnapshots.html filter_description = "filtering" + str(int(time.time())) filter_test_snapshot = self.tester.create_snapshot_from_volume(self.volume, description=filter_description) description_filter = {u'description': filter_description } volume_filter = {u'volume-id': self.volume.id} description_match = self.tester.ec2.get_all_snapshots(filters=description_filter) volume_match = self.tester.ec2.get_all_snapshots(filters=volume_filter) self.tester.delete_snapshot(filter_test_snapshot) if len(description_match) != 1: raise Exception("Non-tag Filtering of snapshots by volume description: " + str(len(description_match)) + " expected: 1") if len(volume_match) != 2: raise Exception("Non-tag Filtering of snapshots by volume id returned: " + str(len(volume_match)) + " expected: 2") ### Test Deletion self.snapshot.delete_tags(tags) snapshots= self.tester.ec2.get_all_snapshots(filters=tag_filter) if len(snapshots) != 0: raise Exception('Filter returned snapshots when there shouldnt be any') if self.snapshot.tags != {}: raise Exception('Tags still returned after deleting them from volume') #self.test_restrictions(self.snapshot) #self.test_in_series(self.snapshot) self.tester.delete_snapshot(self.snapshot) self.snapshot = None def ImageTagging(self): """ This case was developed to exercise tagging of an instance resource """ nametag = u'ImageTaggingName' locationtag = u'ImageTaggingLocation' tags = { nametag: 'image-tag-test', locationtag : 'over there'} orig_image_tags = self.image.tags self.tester.create_tags([self.image.id], tags) ### Test Tag Filtering , u'tag:location' : 'over there' tag_filter = { u'tag:'+nametag: 'image-tag-test'} images = self.tester.ec2.get_all_images(filters=tag_filter) if len(images) != 1: raise Exception('Filter for instances returned too many results') if images[0].id != self.image.id: raise Exception('Wrong instance id returned after filtering') ### Test non-tag Filtering ### Filters can be found here, most will be tested manually, but a spot check should be added ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeImages.html image_description = "image-filtering" filter_image_id = self.tester.register_image( image_location=self.image.location, description=image_description, virtualization_type="hvm") description_filter = {u'description': image_description } location_filter = {u'manifest-location': self.image.location} description_match = self.tester.ec2.get_all_images(filters=description_filter) location_match = self.tester.ec2.get_all_images(filters=location_filter) filter_image = self.tester.get_emi(emi=filter_image_id) self.tester.deregister_image(filter_image) if len(description_match) != 1: raise Exception("Non-tag Filtering of volumes by size: " + str(len(description_match)) + " expected: 1") if len(location_match) != 2: raise Exception("Non-tag Filtering of volumes by zone: " + str(len(location_match)) + " expected: 2") ### Test Deletion self.tester.delete_tags([self.image.id], tags) images = self.tester.ec2.get_all_images(filters=tag_filter) if len(images) != 0: raise Exception('Filter returned images when there shouldnt be any') for tag in tags: if tag in self.image.tags: raise Exception('Tags still returned after deleting them from image: ' + str(self.image.tags)) #self.test_restrictions(self.image) #self.test_in_series(self.image) def SecurityGroupTagging(self): """ This case was developed to exercise tagging of an security group resource """ tags = { u'name': 'security-tag-test', u'location' : 'over there'} self.debug("Security group ID: " + self.group.id) self.tester.create_tags([self.group.id], tags) ### Test Tag Filtering , u'tag:location' : 'over there' tag_filter = { u'tag:name': 'security-tag-test'} groups = self.tester.ec2.get_all_security_groups(filters=tag_filter) if len(groups) != 1: raise Exception('Filter for groups returned too many results') if groups[0].id != self.group.id: raise Exception('Wrong group id returned after filtering') ### Test non-tag Filtering ### Filters can be found here, most will be tested manually, but a spot check should be added ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeSecurityGroups.html group_name = "filter-test" group_description = "group-filtering" filter_group = self.tester.add_group(group_name=group_name, description=group_description) filter_group_2 = self.tester.add_group(group_name=group_name + "2", description=group_description) description_filter = {u'description': group_description } group_id_filter = {u'group-id': filter_group.id} description_match = self.tester.ec2.get_all_security_groups(filters=description_filter) self.debug("Groups matching description:" + str(description_match)) group_id_match = self.tester.ec2.get_all_security_groups(filters=group_id_filter) self.debug("Groups matching owner-id (" + group_id_filter[u'group-id'] + "):" + str(group_id_match)) self.tester.delete_group(filter_group) self.tester.delete_group(filter_group_2) if len(description_match) != 2: raise Exception("Non-tag Filtering of security groups by description: " + str(len(description_match)) + " expected: 2") if len(group_id_match) != 1: raise Exception("Non-tag Filtering of security groups by id: " + str(len(group_id_match)) + " expected: 1") ### Test Deletion self.tester.delete_tags([self.group.id], tags) groups = self.tester.ec2.get_all_security_groups(filters=tag_filter) if len(groups) != 0: raise Exception('Filter returned security groups when there shouldnt be any') if self.group.tags != {}: raise Exception('Tags still returned after deleting them from ' 'security group:' + str(self.group.tags) ) #self.test_restrictions(self.group) #self.test_in_series(self.group) def test_restrictions(self, resource): max_tags_number = 10 max_tags = {} for i in xrange(max_tags_number): max_tags[u'key' + str(i)] = 'value' + str(i) self.test_tag_creation(max_tags, resource=resource, fail_message="Failure when trying to add max allowable tags (" + str(max_tags_number) + ")", expected_outcome=True) self.test_tag_deletion(max_tags, resource=resource,fail_message="Failure when trying to delete max allowable tags (" + str(max_tags_number) + ")", expected_outcome=True) too_many_tags = {} for i in xrange(max_tags_number + 1): too_many_tags[u'key' + str(i)] = 'value' + str(i) self.test_tag_creation(too_many_tags, resource=resource,fail_message="Allowed too many tags to be created", expected_outcome=False) max_key = u'0' * 127 maximum_key_length = { max_key : 'my value'} self.test_tag_creation(maximum_key_length, resource=resource, fail_message="Unable to use a key with " + str(max_key) + " characters", expected_outcome=True) self.test_tag_deletion(maximum_key_length, resource=resource, fail_message="Unable to delete a key with " + str(max_key) + " characters", expected_outcome=True) key_too_large = { max_key + u'0' : 'my value'} self.test_tag_creation(key_too_large, resource=resource, fail_message="Allowed key with more than " + str(max_key) + " chars", expected_outcome=False) maximum_value = '0' * 255 maximum_value_length = { u'my_key': maximum_value} self.test_tag_creation(maximum_value_length, resource=resource, fail_message="Unable to use a value with " + str(maximum_value) + " characters", expected_outcome=True) self.test_tag_deletion(maximum_value_length, resource=resource, fail_message="Unable to delete a value with " + str(maximum_value) + " characters", expected_outcome=True) value_too_large = { u'my_key': maximum_value + '0'} self.test_tag_creation(value_too_large, resource=resource, fail_message="Allowed value with more than " + str(maximum_value) + " chars", expected_outcome=False) aws_key_prefix = { u'aws:something': 'asdfadsf'} self.test_tag_creation(aws_key_prefix, resource=resource, fail_message="Allowed key with 'aws:' prefix'", expected_outcome=False) aws_value_prefix = { u'my_key': 'aws:somethingelse'} self.test_tag_creation(aws_value_prefix, resource=resource, fail_message="Did not allow creation value with 'aws:' prefix'", expected_outcome=True) self.test_tag_creation(aws_value_prefix, resource=resource, fail_message="Did not allow deletion of value with 'aws:' prefix'", expected_outcome=True) lower_case = {u'case': 'value'} upper_case = {u'CASE': 'value'} self.test_tag_creation(lower_case, resource=resource, fail_message="Unable to add key with all lower case", expected_outcome=True) self.test_tag_creation(upper_case, resource=resource, fail_message="Case sensitivity not enforced, unable to create tag with different capitalization", expected_outcome=True) self.test_tag_deletion(lower_case, resource=resource, fail_message="Unable to delete a tag, when testing case sensitivity", expected_outcome=True) self.test_tag_deletion(upper_case, resource=resource, fail_message="Unable to delete a tag, when testing case sensitivity", expected_outcome=True) def test_tag_creation(self, tags, resource, fail_message, expected_outcome=True, timeout=600): actual_outcome = None exception = None try: resource.create_tags(tags, timeout=timeout) actual_outcome = True except Exception, e: exception = e actual_outcome = False finally:
image.id) raise e try: keypath = os.getcwd() + "/" + keypair.name + ".pem" pmsg('Getting contents from /dev...') before_attach = instance.get_dev_dir() except Exception, ie: raise Exception( "Failed to retrieve contents of /dev dir from instance, Error:" + str(ie)) pmsg("Got snapshot of /dev, now creating a volume of " + str(rfsize) + " to attach to our instance...") volume = tester.create_volume(zone, rfsize) dev = "/dev/sdf" pmsg("Attaching Volume (" + volume.id + ") to instance(" + instance.id + ") trying dev(" + dev + ")") try: volume.attach(instance.id, "/dev/sdf") except Exception, ve: raise Exception("Error attaching volume:" + str(volume.id) + ", Error:" + str(ve)) pmsg("Sleeping and waiting for volume to attach fully to instance") tester.sleep(20) for x in range(0, 10): #after_attach = instance.sys('ls -1 /dev/| grep "sd\|vd"') after_attach = instance.get_dev_dir()
class InstanceBasics(EutesterTestCase): def __init__( self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None, user_data=None, instance_user=None, **kwargs): """ EC2 API tests focused on instance store instances :param credpath: Path to directory containing eucarc file :param region: EC2 Region to run testcase in :param config_file: Configuration file path :param password: SSH password for bare metal machines if config is passed and keys arent synced :param emi: Image id to use for test :param zone: Availability Zone to run test in :param user_data: User Data to pass to instance :param instance_user: User to login to instance as :param kwargs: Additional arguments """ super(InstanceBasics, self).__init__(name=name) if region: self.tester = EC2ops(credpath=credpath, region=region) else: self.tester = Eucaops(config_file=config_file, password=password, credpath=credpath) self.instance_timeout = 480 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) if emi: self.image = emi else: self.image = self.tester.get_emi(root_device_type="instance-store",not_location="loadbalancer") self.address = None self.volume = None self.private_addressing = False if not zone: zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name else: self.zone = zone self.reservation = None self.reservation_lock = threading.Lock() self.run_instance_params = {'image': self.image, 'user_data': user_data, 'username': instance_user, 'keypair': self.keypair.name, 'group': self.group.name,'zone': self.zone, 'timeout': self.instance_timeout} self.managed_network = True ### If I have access to the underlying infrastructure I can look ### at the network mode and only run certain tests where it makes sense if hasattr(self.tester,"service_manager"): cc = self.tester.get_component_machines("cc")[0] network_mode = cc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0] if re.search("(SYSTEM|STATIC)", network_mode): self.managed_network = False def set_reservation(self, reservation): self.reservation_lock.acquire() self.reservation = reservation self.reservation_lock.release() def clean_method(self): self.tester.cleanup_artifacts() def BasicInstanceChecks(self): """ This case was developed to run through a series of basic instance tests. The tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these tests fail, the test case will error out, logging the results. """ reservation = self.tester.run_instance(**self.run_instance_params) for instance in reservation.instances: self.assertTrue( self.tester.wait_for_reservation(reservation) ,'Instance did not go to running') self.assertTrue( self.tester.ping(instance.ip_address), 'Could not ping instance') self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"), 'Did not find ephemeral storage at ' + instance.rootfs_device + "2") self.set_reservation(reservation) return reservation def ElasticIps(self): """ This case was developed to test elastic IPs in Eucalyptus. This test case does not test instances that are launched using private-addressing option. The test case executes the following tests: - allocates an IP, associates the IP to the instance, then pings the instance. - disassociates the allocated IP, then pings the instance. - releases the allocated IP address If any of the tests fail, the test case will error out, logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: if instance.ip_address == instance.private_ip_address: self.tester.debug("WARNING: System or Static mode detected, skipping ElasticIps") return reservation self.address = self.tester.allocate_address() self.assertTrue(self.address,'Unable to allocate address') self.tester.associate_address(instance, self.address) instance.update() self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping instance with new IP") self.tester.disassociate_address_from_instance(instance) self.tester.release_address(self.address) self.address = None assert isinstance(instance, EuInstance) self.tester.sleep(5) instance.update() self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping after dissassociate") self.set_reservation(reservation) return reservation def MultipleInstances(self): """ This case was developed to test the maximum number of m1.small vm types a configured cloud can run. The test runs the maximum number of m1.small vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.run_instance(min=2, max=2, **self.run_instance_params) self.assertTrue(self.tester.wait_for_reservation(reservation) ,'Not all instances went to running') self.set_reservation(reservation) return reservation def LargestInstance(self): """ This case was developed to test the maximum number of c1.xlarge vm types a configured cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.run_instance(type="c1.xlarge", **self.run_instance_params) self.assertTrue( self.tester.wait_for_reservation(reservation) ,'Not all instances went to running') self.set_reservation(reservation) return reservation def MetaData(self): """ This case was developed to test the metadata service of an instance for consistency. The following meta-data attributes are tested: - public-keys/0/openssh-key - security-groups - instance-id - local-ipv4 - public-ipv4 - ami-id - ami-launch-index - reservation-id - placement/availability-zone - kernel-id - public-hostname - local-hostname - hostname - ramdisk-id - instance-type - any bad metadata that shouldn't be present. Missing nodes ['block-device-mapping/', 'ami-manifest-path'] If any of these tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], reservation.id), 'Incorrect reservation in metadata') self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.private_dns_name), 'Incorrect host name in metadata') self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata') BAD_META_DATA_KEYS = ['foobar'] for key in BAD_META_DATA_KEYS: self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') self.set_reservation(reservation) return reservation def DNSResolveCheck(self, zone=None): """ This case was developed to test DNS resolution information for public/private DNS names and IP addresses. The tested DNS resolution behavior is expected to follow AWS EC2. The following tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - nslookup on hostname; checks to see if it matches local-ipv4 - nslookup on local-hostname; check to see if it matches local-ipv4 - nslookup on local-ipv4; check to see if it matches local-hostname - nslookup on public-hostname; check to see if it matches local-ipv4 - nslookup on public-ipv4; check to see if it matches public-host If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: if not re.search("internal", instance.private_dns_name): self.tester.debug("Did not find instance DNS enabled, skipping test") self.set_reservation(reservation) return reservation # Test to see if Dynamic DNS has been configured # # Per AWS standard, resolution should have private hostname or private IP as a valid response # Perform DNS resolution against public IP and public DNS name # Perform DNS resolution against private IP and private DNS name # Check to see if nslookup was able to resolve assert isinstance(instance, EuInstance) # Check nslookup to resolve public DNS Name to local-ipv4 address self.assertTrue(instance.found("nslookup " + instance.public_dns_name + " " + self.tester.ec2.host, instance.private_ip_address), "Incorrect DNS resolution for hostname.") # Check nslookup to resolve public-ipv4 address to public DNS name if self.managed_network: self.assertTrue( instance.found("nslookup " + instance.ip_address + " " + self.tester.ec2.host, instance.public_dns_name), "Incorrect DNS resolution for public IP address") # Check nslookup to resolve private DNS Name to local-ipv4 address if self.managed_network: self.assertTrue(instance.found("nslookup " + instance.private_dns_name + " " + self.tester.ec2.host, instance.private_ip_address), "Incorrect DNS resolution for private hostname.") # Check nslookup to resolve local-ipv4 address to private DNS name self.assertTrue(instance.found("nslookup " + instance.private_ip_address + " " + self.tester.ec2.host, instance.private_dns_name), "Incorrect DNS resolution for private IP address") self.assertTrue(self.tester.ping(instance.public_dns_name)) self.set_reservation(reservation) return reservation def Reboot(self, zone=None): """ This case was developed to test IP connectivity and volume attachment after instance reboot. The following tests are done for this test case: - creates a 1 gig EBS volume, then attach volume - reboot instance - attempts to connect to instance via ssh - checks to see if EBS volume is attached - detaches volume - deletes volume If any of these tests fail, the test case will error out; logging the results. """ if zone is None: zone = self.zone if not self.reservation: reservation = self.tester.run_instance(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: ### Create 1GB volume in first AZ volume = self.tester.create_volume(instance.placement, 1) volume_device = instance.attach_volume(volume) ### Reboot instance instance.reboot_instance_and_verify(waitconnect=20) instance.detach_euvolume(volume) self.tester.delete_volume(volume) self.set_reservation(reservation) return reservation def Churn(self): """ This case was developed to test robustness of Eucalyptus by starting instances, stopping them before they are running, and increase the time to terminate on each iteration. This test case leverages the BasicInstanceChecks test case. The following steps are ran: - runs BasicInstanceChecks test case 5 times, 10 second apart. - While each test is running, run and terminate instances with a 10sec sleep in between. - When a test finishes, rerun BasicInstanceChecks test case. If any of these tests fail, the test case will error out; logging the results. """ if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) available_instances_before = self.tester.get_available_vms(zone=self.zone) ## Run through count iterations of test count = 4 future_instances =[] with ThreadPoolExecutor(max_workers=count) as executor: ## Start asynchronous activity ## Run 5 basic instance check instances 10s apart for i in xrange(count): future_instances.append(executor.submit(self.BasicInstanceChecks)) self.tester.sleep(10) with ThreadPoolExecutor(max_workers=count) as executor: ## Start asynchronous activity ## Terminate all instances for future in future_instances: executor.submit(self.tester.terminate_instances,future.result()) def available_after_greater(): return self.tester.get_available_vms(zone=self.zone) >= available_instances_before self.tester.wait_for_result(available_after_greater, result=True, timeout=360) def PrivateIPAddressing(self): """ This case was developed to test instances that are launched with private-addressing set to True. The tests executed are as follows: - run an instance with private-addressing set to True - allocate/associate/disassociate/release an Elastic IP to that instance - check to see if the instance went back to private addressing If any of these tests fail, the test case will error out; logging the results. """ if self.reservation: for instance in self.reservation.instances: if instance.ip_address == instance.private_ip_address: self.tester.debug("WARNING: System or Static mode detected, skipping PrivateIPAddressing") return self.reservation self.tester.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.run_instance(private_addressing=True, **self.run_instance_params) for instance in reservation.instances: address = self.tester.allocate_address() self.assertTrue(address,'Unable to allocate address') self.tester.associate_address(instance, address) self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping instance with new IP") address.disassociate() self.tester.sleep(30) instance.update() self.assertFalse(self.tester.ping(instance.ip_address), "Was able to ping instance that should have only had a private IP") address.release() if instance.ip_address != "0.0.0.0" and instance.ip_address != instance.private_ip_address: self.fail("Instance received a new public IP: " + instance.ip_address) self.tester.terminate_instances(self.reservation) self.set_reservation(None) return reservation def ReuseAddresses(self): """ This case was developed to test when you run instances in a series, and make sure they get the same address. The test launches an instance, checks the IP information, then terminates the instance. This test is launched 5 times in a row. If there is an error, the test case will error out; logging the results. """ prev_address = None if self.reservation: self.tester.terminate_instances(self.reservation) self.set_reservation(None) for i in xrange(5): reservation = self.tester.run_instance(**self.run_instance_params) for instance in reservation.instances: if prev_address is not None: self.assertTrue(re.search(str(prev_address) ,str(instance.ip_address)), str(prev_address) +" Address did not get reused but rather " + str(instance.public_dns_name)) prev_address = instance.ip_address self.tester.terminate_instances(reservation)