class InstanceBasics(unittest.TestCase): def setUp(self, credpath=None): # Setup basic eutester object if credpath is None: credpath = arg_credpath self.tester = Eucaops(credpath=credpath) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def create_attach_volume(self, instance, size): self.volume = self.tester.create_volume(instance.placement, size) device_path = "/dev/" + instance.block_device_prefix + "j" before_attach = instance.get_dev_dir() try: self.assertTrue( self.tester.attach_volume(instance, self.volume, device_path), "Failure attaching volume") except AssertionError, e: self.assertTrue(self.tester.delete_volume(self.volume)) return False after_attach = instance.get_dev_dir() new_devices = self.tester.diff(after_attach, before_attach) if len(new_devices) is 0: return False self.volume_device = "/dev/" + new_devices[0].strip() instance.assertFilePresent(self.volume_device) return True
class InstanceBasics(unittest.TestCase): def setUp(self): # Setup basic eutester object eucarc_regex = re.compile("eucarc-") eucarc_dirs = [path for path in os.listdir(".") if eucarc_regex.search(path)] eucarc_path = None if len(eucarc_dirs) > 0: eucarc_path = eucarc_dirs[0] self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath=eucarc_path) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.image = self.tester.get_emi(root_device_type="instance-store") self.reservation = None self.private_addressing = False zones = self.tester.ec2.get_all_zones() self.zone = random.choice(zones).name def tearDown(self): if self.reservation is not None: self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def create_attach_volume(self, instance, size): self.volume = self.tester.create_volume(instance.placement, size) device_path = "/dev/" + instance.block_device_prefix +"j" before_attach = instance.get_dev_dir() try: self.assertTrue(self.tester.attach_volume(instance, self.volume, device_path), "Failure attaching volume") except AssertionError, e: self.assertTrue( self.tester.delete_volume(self.volume)) return False after_attach = instance.get_dev_dir() new_devices = self.tester.diff(after_attach, before_attach) if len(new_devices) is 0: return False self.volume_device = "/dev/" + new_devices[0].strip() instance.assertFilePresent(self.volume_device) return True
class ConsoleCleanUp(EutesterTestCase): def __init__(self, extra_args= None, **kwargs): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() for kwarg in kwargs: self.args[kwarg] = kwarg[kwarg] # Setup basic eutester object if self.args.region: self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password) self.tester.poll_count = 120 def populate_resources_for_console_test(self): ''' This method creates resources in the cloud. ''' zone=self.tester.ec2.get_all_zones()[0].name volume=self.tester.ec2.create_volume(1,zone) self.tester.wait_for_volume(volume) snapshot=self.tester.create_snapshot_from_volume(volume) self.tester.create_volume(zone=zone,snapshot=snapshot) keypair=self.tester.ec2.create_key_pair("test-key").name s_group=self.tester.ec2.create_security_group("mygroup", "Security group for console test.").name image=self.tester.get_images()[0] image_id=self.tester.get_images()[0].id instance=self.tester.run_image(image=image, keypair="test-key", group="mygroup",auto_connect=False, zone=zone) instance_id=self.tester.get_instances('running')[0].id ip=self.tester.allocate_address().public_ip self.tester.allocate_address() self.tester.ec2.associate_address(instance_id,ip) self.tester.create_launch_config("LC1",image_id ,keypair ,[s_group], instance_type="m1.small") self.tester.create_as_group("ASG1","LC1",self.tester.get_zones(),min_size=1,max_size=8,desired_capacity=2) instance=self.tester.get_instances('running')[0] self.tester.attach_volume(instance,volume,"vdb")
class Euca5033(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.device = "/dev/sda1" self.tester = Eucaops(config_file=self.conf, password="******") self.doAuth() def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testEBS(self): # Get the existing EBS emi self.emi = self.tester.get_emi(root_device_type='ebs') # Start instance self.reservation = self.tester.run_instance(self.emi, keypair=self.keypair.name, group=self.group, is_reachable=False) # Make sure the instance is running set instance variables for instance in self.reservation.instances: if instance.state == "running": self.instance = instance self.zone = instance.placement # Run test self.rootVolume = self.tester.get_volume(attached_dev=self.device) self.tester.stop_instances(self.reservation) # EBS Instance now in stopped state, try and detach/attach root volume. self.tester.detach_volume(self.rootVolume) self.tester.attach_volume(self.instance, self.rootVolume, self.device) # Make sure instance will start. self.tester.start_instances(self.reservation) pass
class Euca5033(unittest.TestCase): def setUp(self): self.conf = "cloud.conf" self.device = "/dev/sda1" self.tester = Eucaops(config_file=self.conf, password="******") self.doAuth() def tearDown(self): self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) self.tester.local("rm " + self.keypair.name + ".pem") shutil.rmtree(self.tester.credpath) def doAuth(self): self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() self.tester.authorize_group(self.group) def testEBS(self): # Get the existing EBS emi self.emi = self.tester.get_emi(root_device_type="ebs") # Start instance self.reservation = self.tester.run_instance( self.emi, keypair=self.keypair.name, group=self.group, is_reachable=False ) # Make sure the instance is running set instance variables for instance in self.reservation.instances: if instance.state == "running": self.instance = instance self.zone = instance.placement # Run test self.rootVolume = self.tester.get_volume(attached_dev=self.device) self.tester.stop_instances(self.reservation) # EBS Instance now in stopped state, try and detach/attach root volume. self.tester.detach_volume(self.rootVolume) self.tester.attach_volume(self.instance, self.rootVolume, self.device) # Make sure instance will start. self.tester.start_instances(self.reservation) pass
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument( '--clean_on_exit', action='store_true', default=True, help= 'Boolean, used to flag whether to run clean up method after running test list)' ) if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() ### Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.tester.cleanup_artifacts() self.cleanUpAutoscaling() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970,1,1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta( seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}" .format(metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) self.tester.sleep(60) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query( start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array ##Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug( 'Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics( namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics( metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics( 60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics( 60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() ###Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics( period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): ### setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the last instance to go to running state. state = None while not (str(state).endswith('running')): self.debug( 'Waiting for AutoScaling instance to go to running state ...') self.tester.sleep(15) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() state = self.instance.state self.debug(self.instanceid + ' is now running.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies( policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies( policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies( policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group( name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' ### This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm( 'exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm( 'change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm( 'percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 ### test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric( 'CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric( 'DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): ### The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count( 'The value AWS/ for parameter Namespace is invalid.'): self.tester.debug( 'testAwsReservedNamspaces generated expected InvalidParameterValue error.' ) return True self.tester.debug( 'testAwsReservedNamspaces did not throw expected InvalidParameterValue error.' ) return False
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument('--clean_on_exit', action='store_true', default=True, help='Boolean, used to flag whether to run clean up method after running test list)') if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # ## Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.cleanUpAutoscaling() self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970, 1, 1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}".format( metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) def isMatricsAvailable(): metrics = self.tester.cw.list_metrics(namespace=self.namespace) if not metrics: return False else: return True self.tester.wait_for_result(isMatricsAvailable, True, timeout=900, poll_wait=300) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query(start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array # #Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug('Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics(metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics(60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics(60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() # ##Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics(period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): # ## setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the instance to go to running state. self.tester.wait_for_result(self.tester.wait_for_instances, True, timeout=600, group_name=self.auto_scaling_group_name) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() self.debug('ASG is now setup.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies(policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies(policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies(policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group(name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' # ## This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm('exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm('change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm('percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 # ## test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric('CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric('DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): # ## The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count('The value AWS/ for parameter Namespace is invalid.'): self.tester.debug('testAwsReservedNamspaces generated expected InvalidParameterValue error.') return True self.tester.debug('testAwsReservedNamspaces did not throw expected InvalidParameterValue error.') return False
class Instances(unittest.TestCase): def setUp(self): # Setup basic eutester object self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath="../credentials") self.tester.poll_count = 240 self.tester.start_euca_logs() ### Determine whether virtio drivers are being used self.device_prefix = "sd" if self.tester.hypervisor == "kvm": self.device_prefix = "vd" self.ephemeral = "/dev/" + self.device_prefix + "a2" ### Adda and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = os.curdir + "/" + self.keypair.name + ".pem" self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name) self.tester.sleep(10) def tearDown(self): """Stop Euca logs""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)") self.tester.delete_group(self.group) self.tester.delete_keypair(self.keypair) os.remove(self.keypath) self.tester.stop_euca_logs() self.tester.save_euca_logs() self.reservation = None self.group = None self.keypair = None self.tester = None self.ephemeral = None def test1_Instance(self): """Instance checks including reachability and ephemeral storage""" for instance in self.reservation.instances: self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running') self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same') self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance') instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) self.assertTrue( instance_ssh.found("ls -1 " + self.ephemeral, self.ephemeral), 'Did not find ephemeral storage at ' + self.ephemeral) self.assertTrue( self.tester.terminate_instances(self.reservation), 'Failure when terminating instance') def test2_ElasticIps(self): """ Basic test for elastic IPs""" for instance in self.reservation.instances: address = self.tester.allocate_address() self.assertTrue(address,'Unable to allocate address') self.assertTrue(self.tester.associate_address(instance, address)) self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") address.disassociate() self.tester.sleep(30) instance.update() self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP") self.tester.release_address() def test3_MaxInstances(self): """Run the maximum m1.smalls available""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance") available_small = self.tester.get_available_vms() self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small) self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') def test4_LargeInstance(self): """Run 1 of the largest instance c1.xlarge""" self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance") self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,type="c1.xlarge") self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances went to running') def test5_MetaData(self): """Check metadata for consistency""" # Missing nodes # ['block-device-mapping/', 'ami-manifest-path' , 'hostname', 'placement/'] for instance in self.reservation.instances: instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) ### Check metadata service self.assertTrue(re.search(instance_ssh.get_metadata("public-keys/0/")[0], self.keypair.name)) self.assertTrue(re.search(instance_ssh.get_metadata("security-groups")[0], self.group)) #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.search(instance_ssh.get_metadata("instance-id")[0], instance.id)) self.assertTrue(re.search(instance_ssh.get_metadata("local-ipv4")[0] , instance.private_ip_address)) self.assertTrue(re.search(instance_ssh.get_metadata("public-ipv4")[0] , instance.ip_address)) self.assertTrue(re.search(instance_ssh.get_metadata("ami-id")[0], instance.image_id)) self.assertTrue(re.search(instance_ssh.get_metadata("ami-launch-index")[0], instance.ami_launch_index)) self.assertTrue(re.search(instance_ssh.get_metadata("reservation-id")[0], self.reservation.id)) self.assertTrue(re.search(instance_ssh.get_metadata("kernel-id")[0], instance.kernel)) self.assertTrue(re.search(instance_ssh.get_metadata("public-hostname")[0], instance.public_dns_name)) self.assertTrue(re.search(instance_ssh.get_metadata("ramdisk-id")[0], instance.ramdisk )) #instance-type self.assertTrue(re.search(instance_ssh.get_metadata("instance-type")[0], instance.instance_type )) def test6_Reboot(self): """Reboot instance ensure IP connectivity and volumes stay attached""" for instance in self.reservation.instances: ### Create 1GB volume in first AZ volume = self.tester.create_volume(self.tester.ec2.get_all_zones()[0].name) ### Pass in check the devices on the instance before the attachment device_path = "/dev/" + self.device_prefix +"j" instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) before_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix) ### Attach the volume to the instance self.assertTrue(self.tester.attach_volume(instance, volume, device_path), "Failure attaching volume") ### Check devices after attachment after_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix) new_devices = self.tester.diff(after_attach, before_attach) ### Check for device in instance self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance before reboot") ### Reboot instance instance.reboot() self.tester.sleep(30) ### Check for device in instance instance_ssh = Eucaops( hostname=instance.public_dns_name, keypath= self.keypath) self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance after reboot") self.assertTrue(self.tester.detach_volume(volume), "Unable to detach volume") self.assertTrue(self.tester.delete_volume(volume), "Unable to delete volume") def suite(): tests = ['test1_Instance', 'test2_ElasticIps', 'test3_MaxInstances', 'test4_LargeInstance','test5_MetaData', 'test6_Reboot'] return unittest.TestSuite(map(Instances, tests))