コード例 #1
0
ファイル: autoscaling.py プロジェクト: datatonic/nephoria
class AutoScalingBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic nephoria object
        if self.args.region:
            self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)

        ### Add and authorize a group for the instance
        self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time()))
        self.tester.ec2.authorize_group_by_name(group_name=self.group.name )
        self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )

        ### Generate a keypair for the instance
        self.keypair = self.tester.ec2.create_keypair_and_localcert( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.ec2.get_emi()
        self.address = None
        self.asg = None

    def clean_method(self):
        if self.asg:
            self.tester.wait_for_result(self.gracefully_delete, True)
            self.tester.autoscaling.delete_as_group(self.asg.name, force=True)
        self.tester.cleanup_artifacts()

    def AutoScalingBasics(self):
        ### create launch configuration
        self.launch_config_name = 'Test-Launch-Config-' + str(time.time())
        self.tester.autoscaling.create_launch_config(name=self.launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])

        ### create auto scale group
        self.auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.autoscaling.create_as_group(group_name=self.auto_scaling_group_name,
                                    availability_zones=self.tester.ec2.get_zones(),
                                    launch_config=self.launch_config_name,
                                    min_size=0,
                                    max_size=5)

        ### Test Create and describe Auto Scaling Policy
        self.up_policy_name = "Up-Policy-" + str(time.time())
        self.up_size = 4
        self.tester.autoscaling.create_as_policy(name=self.up_policy_name,
                                     adjustment_type="ChangeInCapacity",
                                     scaling_adjustment=4,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)
        if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.up_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.up_policy_name +' not created')

        self.down_policy_name = "Down-Policy-" + str(time.time())
        self.down_size = -50
        self.tester.autoscaling.create_as_policy(name=self.down_policy_name,
                                     adjustment_type="PercentChangeInCapacity",
                                     scaling_adjustment=self.down_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.down_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.down_policy_name +' not created')

        self.exact_policy_name = "Exact-Policy-" + str(time.time())
        self.exact_size = 0
        self.tester.autoscaling.create_as_policy(name=self.exact_policy_name,
                                     adjustment_type="ExactCapacity",
                                     scaling_adjustment=self.exact_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.exact_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.exact_policy_name +' not created')

        self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180)
        ### Test Execute ChangeInCapacity Auto Scaling Policy
        self.tester.autoscaling.execute_as_policy(policy_name=self.up_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.up_size:
            raise Exception("Auto Scale Up not executed")
        self.debug("Executed  ChangeInCapacity policy, increased desired capacity to: " +
                   str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Execute PercentChangeInCapacity Auto Scaling Policy
        self.tester.autoscaling.execute_as_policy(policy_name=self.down_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != 0.5 * self.up_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " +
                   str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Execute ExactCapacity Auto Scaling Policy
        self.tester.autoscaling.execute_as_policy(policy_name=self.exact_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.exact_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed ExactCapacity policy, exact capacity is: " +
                   str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Delete all Auto Scaling Policies
        self.tester.autoscaling.delete_all_policies()

        ### Test Delete Auto Scaling Group
        self.tester.autoscaling.wait_for_result(self.gracefully_delete, True)
        self.asg = None

        ### Test delete launch config
        self.tester.autoscaling.delete_launch_config(self.launch_config_name)

    def scaling_activities_complete(self):
        activities = self.asg.get_activities()
        for activity in activities:
            assert isinstance(activity,Activity)
            if activity.progress != 100:
                return False
        return True

    def AutoScalingInstanceBasics(self):
        """
        This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup
        """
        pass

    def too_many_launch_configs_test(self):
        """
        AWS enforces a 100 LC per account limit this nephoria_unit_tests what happens if we create more
        """
        for i in range(101):
            self.launch_config_name = 'Test-Launch-Config-' + str(i + 1)
            self.tester.autoscaling.create_launch_config(name=self.launch_config_name,
                                             image_id=self.image.id)
            if len(self.tester.autoscaling.describe_launch_config()) > 100:
                raise Exception("More then 100 launch configs exist in 1 account")
        for lc in self.tester.autoscaling.describe_launch_config():
            self.tester.autoscaling.delete_launch_config(lc.name)

    def too_many_policies_test(self):
        """
        AWS enforces a 25 policy per account limit this nephoria_unit_tests what happens if we create more
        """
        launch_config_name = 'LC-' + str(time.time())
        self.tester.autoscaling.create_launch_config(name=launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        asg_name = 'ASG-' + str(time.time())
        self.asg = self.tester.autoscaling.create_as_group(group_name=asg_name,
                                    launch_config=launch_config_name,
                                    availability_zones=self.tester.autoscaling.get_zones(),
                                    min_size=0,
                                    max_size=5)
        for i in range(26):
            policy_name = "Policy-" + str(i + 1)
            self.tester.autoscaling.create_as_policy(name=policy_name,
                                         adjustment_type="ExactCapacity",
                                         as_name=asg_name,
                                         scaling_adjustment=0,
                                         cooldown=120)
        if len(self.tester.autoscaling.autoscale.get_all_policies()) > 25:
            raise Exception("More than 25 policies exist for 1 auto scaling group")
        self.tester.autoscaling.wait_for_result(self.gracefully_delete, True)
        self.asg = None

    def too_many_as_groups(self):
        """
        AWS imposes a 20 ASG/acct limit
        """
        pass

    def clear_all(self):
        """

        remove ALL scaling policies, auto scaling groups and launch configs
        """
        self.tester.autoscaling.delete_all_policies()
        self.tester.autoscaling.delete_all_autoscaling_groups()
        self.tester.autoscaling.delete_all_launch_configs()

    def change_config(self):
        ### create initial launch configuration
        first_launch_config = 'First-Launch-Config-' + str(time.time())
        self.tester.autoscaling.create_launch_config(name=first_launch_config, image_id=self.image.id, instance_type="m1.small")

        # create a replacement LC with different instance type
        second_launch_config = 'Second-Launch-Config-' + str(time.time())
        self.tester.autoscaling.create_launch_config(name=second_launch_config, image_id=self.image.id, instance_type="m1.large")

        ### create auto scale group
        auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.autoscaling.create_as_group(group_name=auto_scaling_group_name,
                                    launch_config=first_launch_config,
                                    availability_zones=self.tester.ec2.get_zones(),
                                    min_size=1,
                                    max_size=4,
                                    desired_capacity=1)

        assert isinstance(self.asg, AutoScalingGroup)
        self.tester.autoscaling.wait_for_result(self.tester.autoscaling.wait_for_instances, True,
                                                timeout=360, group_name=self.asg.name, tester=self.tester)

        self.tester.autoscaling.update_as_group(group_name=self.asg.name,
                                                launch_config=second_launch_config,
                                                availability_zones=self.tester.ec2.get_zones(),
                                                min_size=1,
                                                max_size=4)
        ### Set desired capacity
        new_desired = 2
        self.asg.set_capacity(new_desired)
        self.tester.autoscaling.wait_for_result(self.tester.autoscaling.wait_for_instances, True, timeout=360,
                                                group_name=self.asg.name, number=new_desired, tester=self.tester)
        last_instance = self.tester.ec2.get_instances(
            idstring=self.tester.autoscaling.get_last_instance_id(tester=self.tester))[0]
        assert last_instance.instance_type == "m1.large"

        ### Delete Auto Scaling Group
        self.tester.autoscaling.wait_for_result(self.gracefully_delete, True)
        self.asg = None
        ### delete launch configs
        self.tester.autoscaling.delete_launch_config(first_launch_config)
        self.tester.autoscaling.delete_launch_config(second_launch_config)

    def gracefully_delete(self, asg = None):
            if not asg:
                asg = self.asg
            assert isinstance(asg, AutoScalingGroup)
            try:
                self.tester.autoscaling.delete_as_group(name=asg.name, force=True)
            except BotoServerError, e:
                if e.status == 400 and e.reason == "ScalingActivityInProgress":
                    return False
            return True
コード例 #2
0
class CloudWatchBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument('--clean_on_exit',
                                 action='store_true', default=True,
                                 help='Boolean, used to flag whether to run clean up method after running test list)')
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)

        self.get_args()
        ### Setup basic nephoria object
        if self.args.region:
            self.tester = CWops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath)
        self.start_time = str(int(time.time()))
        self.zone = self.tester.ec2.get_zones()
        self.namespace = 'Namespace-' + self.start_time
        self.keypair = self.tester.ec2.create_keypair_and_localcert()
        self.group = self.tester.ec2.add_group()
        ### Setup AutoScaling
        self.setUpAutoscaling()
        ### Create Dimensions used in nephoria_unit_tests
        self.instanceDimension = newDimension('InstanceId', self.instanceid)
        self.volumeDimension = newDimension('VolumeId', self.volume.id)
        self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name)
        ### Setup Alarms
        self.setUpAlarms()
        ### Wait for metrics to populate, timeout 30 minute
        self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800)

    def clean_method(self):
        self.cleanUpAutoscaling()
        self.tester.cleanup_artifacts()
        self.tester.ec2.delete_keypair(self.keypair)
        pass

    def get_time_window(self, end=None, **kwargs):
        if not end:
            end = datetime.datetime.utcnow()
        start = end - datetime.timedelta(**kwargs)
        return (start,end)

    def print_timeseries_for_graphite(self, timeseries):
            for datapoint in timeseries:
                print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \
                      str((datapoint['Timestamp'] - datetime.datetime(1970,1,1)).total_seconds())

    def PutDataGetStats(self):
        assert self.testAwsReservedNamspaces()
        seconds_to_put_data = 120
        metric_data = 1
        time_string =  str(int(time.time()))
        metric_name = "Metric-" + time_string
        incrementing = True
        while datetime.datetime.now().second != 0:
            self.tester.debug("Waiting for minute edge")
            self.tester.sleep(1)
        start = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_to_put_data)
        for i in xrange(seconds_to_put_data):
            timestamp = start + datetime.timedelta(seconds=i)
            self.tester.debug("Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}"
                              .format(metric=metric_name, namespace=self.namespace,
                                      value=metric_data, timestamp=timestamp))
            self.tester.cloudwatch.put_metric_data(self.namespace, [metric_name],[metric_data],timestamp=timestamp)
            if metric_data == 600 or metric_data == 0:
                incrementing = not incrementing
            if incrementing:
                metric_data += 1
            else:
                metric_data -= 1
        end = start + datetime.timedelta(seconds=seconds_to_put_data)
        self.tester.sleep(60)
        metric = self.tester.cloudwatch.list_metrics(namespace=self.namespace)[0]
        assert isinstance(metric,Metric)
        stats_array = metric.query(start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum','SampleCount'] )
        assert len(stats_array) == 2
        if stats_array[0]['Minimum'] == 1:
            first_sample = stats_array[0]
            second_sample = stats_array[1]
        else:
            second_sample = stats_array[0]
            first_sample = stats_array[1]
        print stats_array

        ##Check sample 1
        assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0
        assert first_sample['Average'] < 34 and first_sample['Average'] > 26
        assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500
        assert first_sample['SampleCount'] > 50
        ##Check sample 2
        assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50
        assert second_sample['Average'] < 95 and second_sample['Average'] > 80
        assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600
        assert second_sample['SampleCount'] > 50

        assert first_sample['Average'] < second_sample['Average']
        assert first_sample['Sum'] < second_sample['Sum']
        assert first_sample['Maximum'] < second_sample['Maximum']
        assert first_sample['Minimum'] < second_sample['Minimum']

    def ListMetrics(self, metricNames, dimension):
        self.debug('Get Metric list')
        metricList = self.tester.cloudwatch.list_metrics(dimensions=dimension)
        self.debug('Checking to see if list is populated at all.')
        assert len(metricList) > 0
        self.debug('Make sure dimensions are listed.')
        found=False
        for metric in metricList:
            self.debug(metric.dimensions)
            if str(metric.dimensions).count(dimension[dimension.keys().pop()]) :
                self.debug('Dimension ' + dimension[dimension.keys().pop()])
                found=True
                break
        assert found
        self.debug('Checking to see if we get all the expected instance metrics.')
        for metric in  metricNames:
            assert str(metricList).count(metric['name']) > 0
            self.debug('Metric ' + metric['name'])
        pass

    def checkMetricFilters(self):
        self.debug('Check list_metrics filtering parameters')
        metricList = self.tester.cloudwatch.list_metrics(namespace='AWS/EC2')
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(namespace='AWS/EBS')
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(namespace='NonExistent-NameSpace')
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(metric_name='CPUUtilization')
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(metric_name='NonExistent-Metric-Name')
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=self.instanceDimension)
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceId','NonExistent-InstanceId'))
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=self.volumeDimension)
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('VolumeId','NonExistent-VolumeId'))
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('ImageId', self.image.id))
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('ImageId','NonExistent-imageId'))
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceType', self.instance_type))
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceType','NonExistent-InstanceType'))
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=self.autoScalingDimension)
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('AutoScalingGroupName','NonExistent-AutoScalingGroupName'))
        assert len(metricList) == 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=self.volumeDimension)
        assert len(metricList) > 0
        metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId'))
        assert len(metricList) == 0
        pass

    def IsMetricsListPopulated(self):
        end          = datetime.datetime.utcnow()
        start        = end - datetime.timedelta(minutes=20)
        metrics1=self.tester.cloudwatch.get_metric_statistics(60,start,end,'CPUUtilization','AWS/EC2','Average',dimensions=self.instanceDimension,unit='Percent')
        metrics2=self.tester.cloudwatch.get_metric_statistics(60,start,end,'VolumeReadBytes','AWS/EBS','Average',dimensions=self.volumeDimension,unit='Bytes')
        if len(metrics1) > 0 and len(metrics2) > 0 :
            return True
        else:
            return False

    def GetMetricStatistics(self, metricNames, namespace, dimension):
        period       = 60
        end          = datetime.datetime.utcnow()
        start        = end - datetime.timedelta(minutes=20)
        stats        = self.tester.cloudwatch.get_stats_array()
        ###Check to make sure we are getting all namespace metrics and statistics
        for i in range(len(metricNames)):
            values = []
            for j in range(len(stats)):
                metricName = metricNames[i]['name']
                statisticName = stats[j]
                unitType =  metricNames[i]['unit']
                metrics = self.tester.cloudwatch.get_metric_statistics(period, start, end, metricName, namespace, statisticName , dimensions=dimension, unit=unitType)
                ### This assures we are getting all statistics for all dimension metrics.
                assert int(len(metrics)) > 0
                statisticValue = str(metrics[0][statisticName])
                self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType)
                values.append(statisticValue)
        self.tester.cloudwatch.validateStats(values)

    def setUpAutoscaling(self):
        ### setup autoscaling variables:s
        self.debug('Setting up AutoScaling, starting 1 instance')
        self.instance_type = 'm1.small'
        self.image = self.tester.ec2.get_emi(root_device_type='instance-store')
        self.launch_config_name='ASConfig'
        self.auto_scaling_group_name ='ASGroup'
        self.exact = 'ExactCapacity'
        self.change = 'ChangeInCapacity'
        self.percent = 'PercentChangeInCapacity'
        self.cleanUpAutoscaling()
        diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &'
        diskRead  = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &'
        ### create launch configuration
        self.tester.autoscaling.create_launch_config(name= self.launch_config_name,
                                         image_id=self.image.id,
                                         instance_type=self.instance_type,
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name],
                                         instance_monitoring=True,
                                         user_data=diskWrite + ' ' + diskRead)
        ### create auto scale group
        self.tester.autoscaling.create_as_group(group_name=self.auto_scaling_group_name,
                                    availability_zones=self.zone,
                                    launch_config=self.launch_config_name,
                                    min_size=0,
                                    max_size=5,
                                    desired_capacity=1)
        ### create auto scale policys
        self.tester.autoscaling.create_as_policy(name=self.exact,
                                     adjustment_type=self.exact,
                                     scaling_adjustment=0,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=0)

        self.tester.autoscaling.create_as_policy(name=self.change,
                                     adjustment_type=self.change,
                                     scaling_adjustment=1,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=0)

        self.tester.autoscaling.create_as_policy(name=self.percent,
                                     adjustment_type=self.percent,
                                     scaling_adjustment=-50,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=0)

        ## Wait for the instance to go to running state.
        self.tester.wait_for_result(self.tester.autoscaling.wait_for_instances, True, timeout=600,
                                    group_name=self.auto_scaling_group_name, tester=self.tester)
        self.instanceid = self.tester.autoscaling.get_last_instance_id(tester=self.tester)
        instance_list = self.tester.ec2.get_instances(idstring=self.instanceid)
        self.instance = instance_list.pop()
        self.debug('ASG is now setup.')
        ### Create and attach a volume
        self.volume = self.tester.ec2.create_volume(self.zone.pop())
        self.tester.ec2.attach_volume(self.instance, self.volume, '/dev/sdf' )
        ### Get the newly created policies.
        self.policy_exact = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.exact])
        self.policy_change = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.change])
        self.policy_percent = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.percent])
        self.debug('AutoScaling setup Complete')

    def cleanUpAutoscaling(self):
        self.tester.cloudwatch.delete_all_alarms()
        self.tester.autoscaling.delete_all_policies()
        self.tester.autoscaling.delete_as_group(name=self.auto_scaling_group_name,force=True)
        self.tester.autoscaling.delete_launch_config(self.launch_config_name)

    def isInService(self):
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        allInService = True
        for instance in group.instances:
            if not str(instance.lifecycle_state).endswith('InService'):
                    allInService = False
                    break
        return allInService

    def setUpAlarms(self):
        metric            = 'CPUUtilization'
        comparison        = '>'
        threshold         = 0
        period            = 60
        evaluation_periods= 1
        statistic         = 'Average'
        ### This alarm sets the number of running instances to exactly 0
        alarm_exact = self.tester.cloudwatch.metric_alarm( 'exact', metric, comparison, threshold ,period, evaluation_periods, statistic,
                                         description='TEST',
                                         namespace='AWS/EC2',
                                         dimensions=self.instanceDimension,
                                         alarm_actions=self.policy_exact.pop().policy_arn)
        ### This alarm sets the number of running instances to + 1
        alarm_change = self.tester.cloudwatch.metric_alarm( 'change', metric, comparison, threshold ,period, evaluation_periods, statistic,
                                         description='TEST',
                                         namespace='AWS/EC2',
                                         dimensions=self.instanceDimension,
                                         alarm_actions=self.policy_change.pop().policy_arn)
        ### This alarm sets the number of running instances to -50%
        alarm_percent = self.tester.cloudwatch.metric_alarm( 'percent', metric, comparison, threshold ,period, evaluation_periods, statistic,
                                         description='TEST',
                                         namespace='AWS/EC2',
                                         dimensions=self.instanceDimension,
                                         alarm_actions=self.policy_percent.pop().policy_arn)
        ### put all the alarms
        self.tester.cloudwatch.put_metric_alarm(alarm_change)
        self.tester.cloudwatch.put_metric_alarm(alarm_percent)
        self.tester.cloudwatch.put_metric_alarm(alarm_exact)

    def testDesribeAlarms(self):
        self.debug(self.tester.cloudwatch.describe_alarms())
        assert len(self.tester.cloudwatch.describe_alarms()) >= 3
        ### test describe_alarms_for_metric for created alarms
        assert len(self.tester.cloudwatch.describe_alarms_for_metric('CPUUtilization','AWS/EC2',dimensions=self.instanceDimension)) == 3
        ### There are not be any alarms created for 'DiskReadOps'
        assert len(self.tester.cloudwatch.describe_alarms_for_metric('DiskReadOps','AWS/EC2',dimensions=self.instanceDimension)) == 0
        ### test describe_alarm_history
        self.debug(self.tester.cloudwatch.describe_alarm_history())
        assert len(self.tester.cloudwatch.describe_alarm_history()) >= 3
        pass

    def testAlarms(self):
        ### The number of running instances should equal the desired_capacity for the auto_scaling_group = (1)
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        assert len(group.instances) == 1
        ### The number of running instances should still be 1 with 'exact' disabled
        self.tester.cloudwatch.disable_alarm_actions('exact')
        self.tester.cloudwatch.set_alarm_state('exact')
        self.tester.sleep(15)
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        assert len(group.instances) == 1
        self.tester.cloudwatch.enable_alarm_actions('exact')
        self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1')
        ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2)
        self.tester.cloudwatch.set_alarm_state('change')
        self.tester.sleep(15)
        self.tester.wait_for_result(self.isInService,result=True, timeout=240)
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        self.debug(len(group.instances))
        assert len(group.instances) == 2
        self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2')
        ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1)
        self.tester.cloudwatch.set_alarm_state('percent')
        self.tester.sleep(15)
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        assert len(group.instances) == 1
        self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%')
        ### This should terminate all instances in the auto_scaling_group. 
        self.tester.cloudwatch.set_alarm_state('exact')
        self.tester.sleep(15)
        group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name)
        assert group.instances == None
        self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0')
        pass

    def testAwsReservedNamspaces(self):
        try:
            self.tester.cloudwatch.put_metric_data('AWS/AnyName', 'TestMetricName',1)
        except Exception, e:
            if str(e).count('The value AWS/ for parameter Namespace is invalid.'):
                self.tester.debug('testAwsReservedNamspaces generated expected InvalidParameterValue error.')
                return True
        self.tester.debug('testAwsReservedNamspaces did not throw expected InvalidParameterValue error.' )
        return False