class ConsoleCleanUp(EutesterTestCase): def __init__(self, extra_args= None, **kwargs): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() for kwarg in kwargs: self.args[kwarg] = kwarg[kwarg] # Setup basic eutester object if self.args.region: self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password) self.tester.poll_count = 120 def populate_resources_for_console_test(self): ''' This method creates resources in the cloud. ''' zone=self.tester.ec2.get_all_zones()[0].name volume=self.tester.ec2.create_volume(1,zone) self.tester.wait_for_volume(volume) snapshot=self.tester.create_snapshot_from_volume(volume) self.tester.create_volume(zone=zone,snapshot=snapshot) keypair=self.tester.ec2.create_key_pair("test-key").name s_group=self.tester.ec2.create_security_group("mygroup", "Security group for console test.").name image=self.tester.get_images()[0] image_id=self.tester.get_images()[0].id instance=self.tester.run_image(image=image, keypair="test-key", group="mygroup",auto_connect=False, zone=zone) instance_id=self.tester.get_instances('running')[0].id ip=self.tester.allocate_address().public_ip self.tester.allocate_address() self.tester.ec2.associate_address(instance_id,ip) self.tester.create_launch_config("LC1",image_id ,keypair ,[s_group], instance_type="m1.small") self.tester.create_as_group("ASG1","LC1",self.tester.get_zones(),min_size=1,max_size=8,desired_capacity=2) instance=self.tester.get_instances('running')[0] self.tester.attach_volume(instance,volume,"vdb")
class AutoScalingBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.emi: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None def clean_method(self): ### DELETE group self.tester.delete_group(self.group) ### Delete keypair in cloud and from filesystem self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def AutoScalingBasics(self): ### test create and describe launch config self.launch_config_name = 'Test-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, key_name=self.keypair.name, security_groups=[self.group.name]) if len(self.tester.describe_launch_config([self.launch_config_name ])) != 1: raise Exception('Launch Config not created') self.debug('**** Created Launch Config: ' + self.tester.describe_launch_config( [self.launch_config_name])[0].name) ### test create and describe auto scale group self.initial_size = len(self.tester.describe_as_group()) self.auto_scaling_group_name = 'ASG-' + str(time.time()) self.tester.create_as_group(group_name=self.auto_scaling_group_name, launch_config=self.launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5, connection=self.tester.autoscale) if len(self.tester.describe_as_group([self.auto_scaling_group_name ])) != 1: raise Exception('Auto Scaling Group not created') self.debug("**** Created Auto Scaling Group: " + self.tester.describe_as_group( [self.auto_scaling_group_name])[0].name) ### Test Create and describe Auto Scaling Policy self.up_policy_name = "Up-Policy-" + str(time.time()) self.up_size = 4 self.tester.create_as_policy(name=self.up_policy_name, adjustment_type="ChangeInCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=4, cooldown=120) self.down_policy_name = "Down-Policy-" + str(time.time()) self.down_size = -50 self.tester.create_as_policy(name=self.down_policy_name, adjustment_type="PercentChangeInCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=self.down_size, cooldown=120) self.exact_policy_name = "Exact-Policy-" + str(time.time()) self.exact_size = 0 self.tester.create_as_policy(name=self.exact_policy_name, adjustment_type="ExactCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=self.exact_size, cooldown=120) ### Test all policies added to group if len(self.tester.autoscale.get_all_policies()) != 3: raise Exception('Auto Scaling policies not created') self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) ### Test Execute ChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([ self.auto_scaling_group_name ])[0].desired_capacity != self.up_size: raise Exception("Auto Scale Up not executed") self.debug( "Executed ChangeInCapacity policy, increased desired capacity to: " + str( self.tester.describe_as_group([self.auto_scaling_group_name]) [0].desired_capacity)) ### Test Execute PercentChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([ self.auto_scaling_group_name ])[0].desired_capacity != 0.5 * self.up_size: raise Exception("Auto Scale down percentage not executed") self.debug( "Executed PercentChangeInCapacity policy, decreased desired capacity to: " + str( self.tester.describe_as_group([self.auto_scaling_group_name]) [0].desired_capacity)) ### Test Execute ExactCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([ self.auto_scaling_group_name ])[0].desired_capacity != self.exact_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed ExactCapacity policy, exact capacity is: " + str( self.tester.describe_as_group([self.auto_scaling_group_name]) [0].desired_capacity)) ### Test Delete all Auto Scaling Policies for policy in self.tester.autoscale.get_all_policies(): self.tester.delete_as_policy(policy_name=policy.name, autoscale_group=policy.as_name) if len(self.tester.autoscale.get_all_policies()) != 0: raise Exception('Auto Scaling policy not deleted') self.debug("**** Deleted Auto Scaling Policy: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) ### Test Delete Auto Scaling Group self.tester.delete_as_group(names=self.auto_scaling_group_name) if len(self.tester.describe_as_group([self.auto_scaling_group_name ])) != 0: raise Exception('Auto Scaling Group not deleted') self.debug('**** Deleted Auto Scaling Group: ' + self.auto_scaling_group_name) ### pause for Auto scaling group to be deleted # TODO write wait/poll op for auto scaling groups # time.sleep(5) ### Test delete launch config self.tester.delete_launch_config(self.launch_config_name) if len(self.tester.describe_launch_config([self.launch_config_name ])) != 0: raise Exception('Launch Config not deleted') self.debug('**** Deleted Launch Config: ' + self.launch_config_name) def AutoScalingInstanceBasics(self): """ This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup """ pass def too_many_launch_configs_test(self): """ AWS enforces a 100 LC per account limit this tests what happens if we create more """ for i in range(101): self.launch_config_name = 'Test-Launch-Config-' + str(i + 1) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id) if len(self.tester.describe_launch_config()) > 100: raise Exception( "More then 100 launch configs exist in 1 account") for lc in self.tester.describe_launch_config(): self.tester.delete_launch_config(lc.name) def too_many_policies_test(self): launch_config_name = 'LC-' + str(time.time()) self.tester.create_launch_config(name=launch_config_name, image_id=self.image.id, key_name=self.keypair.name, security_groups=[self.group.name]) asg = 'ASG-' + str(time.time()) self.tester.create_as_group(group_name=asg, launch_config=launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5, connection=self.tester.autoscale) for i in range(26): policy_name = "Policy-" + str(i + 1) self.tester.create_as_policy(name=policy_name, adjustment_type="ExactCapacity", as_name=asg, scaling_adjustment=0, cooldown=120) if len(self.tester.autoscale.get_all_policies()) > 25: raise Exception( "More than 25 policies exist for 1 auto scaling group") self.tester.delete_as_group(names=asg) def too_many_as_groups(self): """ AWS imposes a 20 ASG/acct limit """ pass def clear_all(self): self.tester.delete_all_autoscaling_groups() self.tester.delete_all_launch_configs()
class AutoScalingBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.emi: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store") self.address = None def clean_method(self): ### DELETE group self.tester.delete_group(self.group) ### Delete keypair in cloud and from filesystem self.tester.delete_keypair(self.keypair) os.remove(self.keypath) def AutoScalingBasics(self): ### test create and describe launch config self.launch_config_name = 'Test-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, key_name=self.keypair.name, security_groups=[self.group.name]) if len(self.tester.describe_launch_config([self.launch_config_name])) != 1: raise Exception('Launch Config not created') self.debug('**** Created Launch Config: ' + self.tester.describe_launch_config([self.launch_config_name])[0].name) ### test create and describe auto scale group self.initial_size = len(self.tester.describe_as_group()) self.auto_scaling_group_name = 'ASG-' + str(time.time()) self.tester.create_as_group(group_name=self.auto_scaling_group_name, launch_config=self.launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5, connection=self.tester.autoscale) if len(self.tester.describe_as_group([self.auto_scaling_group_name])) != 1: raise Exception('Auto Scaling Group not created') self.debug("**** Created Auto Scaling Group: " + self.tester.describe_as_group([self.auto_scaling_group_name])[0].name) ### Test Create and describe Auto Scaling Policy self.up_policy_name = "Up-Policy-" + str(time.time()) self.up_size = 4 self.tester.create_as_policy(name=self.up_policy_name, adjustment_type="ChangeInCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=4, cooldown=120) self.down_policy_name = "Down-Policy-" + str(time.time()) self.down_size = -50 self.tester.create_as_policy(name=self.down_policy_name, adjustment_type="PercentChangeInCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=self.down_size, cooldown=120) self.exact_policy_name = "Exact-Policy-" + str(time.time()) self.exact_size = 0 self.tester.create_as_policy(name=self.exact_policy_name, adjustment_type="ExactCapacity", as_name=self.auto_scaling_group_name, scaling_adjustment=self.exact_size, cooldown=120) ### Test all policies added to group if len(self.tester.autoscale.get_all_policies()) != 3: raise Exception('Auto Scaling policies not created') self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) ### Test Execute ChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != self.up_size: raise Exception("Auto Scale Up not executed") self.debug("Executed ChangeInCapacity policy, increased desired capacity to: " + str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity)) ### Test Execute PercentChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != 0.5 * self.up_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " + str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity)) ### Test Execute ExactCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name) if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != self.exact_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed ExactCapacity policy, exact capacity is: " + str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity)) ### Test Delete all Auto Scaling Policies for policy in self.tester.autoscale.get_all_policies(): self.tester.delete_as_policy(policy_name=policy.name, autoscale_group=policy.as_name) if len(self.tester.autoscale.get_all_policies()) != 0: raise Exception('Auto Scaling policy not deleted') self.debug("**** Deleted Auto Scaling Policy: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) ### Test Delete Auto Scaling Group self.tester.delete_as_group(names=self.auto_scaling_group_name) if len(self.tester.describe_as_group([self.auto_scaling_group_name])) != 0: raise Exception('Auto Scaling Group not deleted') self.debug('**** Deleted Auto Scaling Group: ' + self.auto_scaling_group_name) ### pause for Auto scaling group to be deleted # TODO write wait/poll op for auto scaling groups # time.sleep(5) ### Test delete launch config self.tester.delete_launch_config(self.launch_config_name) if len(self.tester.describe_launch_config([self.launch_config_name])) != 0: raise Exception('Launch Config not deleted') self.debug('**** Deleted Launch Config: ' + self.launch_config_name) def AutoScalingInstanceBasics(self): """ This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup """ pass def too_many_launch_configs_test(self): """ AWS enforces a 100 LC per account limit this tests what happens if we create more """ for i in range(101): self.launch_config_name = 'Test-Launch-Config-' + str(i + 1) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id) if len(self.tester.describe_launch_config()) > 100: raise Exception("More then 100 launch configs exist in 1 account") for lc in self.tester.describe_launch_config(): self.tester.delete_launch_config(lc.name) def too_many_policies_test(self): launch_config_name = 'LC-' + str(time.time()) self.tester.create_launch_config(name=launch_config_name, image_id=self.image.id, key_name=self.keypair.name, security_groups=[self.group.name]) asg = 'ASG-' + str(time.time()) self.tester.create_as_group(group_name=asg, launch_config=launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5, connection=self.tester.autoscale) for i in range(26): policy_name = "Policy-" + str(i + 1) self.tester.create_as_policy(name=policy_name, adjustment_type="ExactCapacity", as_name=asg, scaling_adjustment=0, cooldown=120) if len(self.tester.autoscale.get_all_policies()) > 25: raise Exception("More than 25 policies exist for 1 auto scaling group") self.tester.delete_as_group(names=asg) def too_many_as_groups(self): """ AWS imposes a 20 ASG/acct limit """ pass def clear_all(self): self.tester.delete_all_autoscaling_groups() self.tester.delete_all_launch_configs()
class AutoScalingBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.region: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name ) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.add_keypair( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store", not_location="loadbalancer") self.address = None self.asg = None def clean_method(self): if self.asg: self.tester.wait_for_result(self.gracefully_delete, True) self.tester.delete_as_group(self.asg.name, force=True) self.tester.cleanup_artifacts() def AutoScalingBasics(self): ### create launch configuration self.launch_config_name = 'Test-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) ### create auto scale group self.auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.tester.get_zones(), launch_config=self.launch_config_name, min_size=0, max_size=5) ### Test Create and describe Auto Scaling Policy self.up_policy_name = "Up-Policy-" + str(time.time()) self.up_size = 4 self.tester.create_as_policy(name=self.up_policy_name, adjustment_type="ChangeInCapacity", scaling_adjustment=4, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscale.get_all_policies(policy_names=[self.up_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.up_policy_name +' not created') self.down_policy_name = "Down-Policy-" + str(time.time()) self.down_size = -50 self.tester.create_as_policy(name=self.down_policy_name, adjustment_type="PercentChangeInCapacity", scaling_adjustment=self.down_size, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscale.get_all_policies(policy_names=[self.down_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.down_policy_name +' not created') self.exact_policy_name = "Exact-Policy-" + str(time.time()) self.exact_size = 0 self.tester.create_as_policy(name=self.exact_policy_name, adjustment_type="ExactCapacity", scaling_adjustment=self.exact_size, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscale.get_all_policies(policy_names=[self.exact_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.exact_policy_name +' not created') self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.up_size: raise Exception("Auto Scale Up not executed") self.debug("Executed ChangeInCapacity policy, increased desired capacity to: " + str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute PercentChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != 0.5 * self.up_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " + str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ExactCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.exact_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed ExactCapacity policy, exact capacity is: " + str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Delete all Auto Scaling Policies self.tester.delete_all_policies() ### Test Delete Auto Scaling Group self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None ### Test delete launch config self.tester.delete_launch_config(self.launch_config_name) def scaling_activities_complete(self): activities = self.asg.get_activities() for activity in activities: assert isinstance(activity,Activity) if activity.progress != 100: return False return True def AutoScalingInstanceBasics(self): """ This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup """ pass def too_many_launch_configs_test(self): """ AWS enforces a 100 LC per account limit this tests what happens if we create more """ for i in range(101): self.launch_config_name = 'Test-Launch-Config-' + str(i + 1) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id) if len(self.tester.describe_launch_config()) > 100: raise Exception("More then 100 launch configs exist in 1 account") for lc in self.tester.describe_launch_config(): self.tester.delete_launch_config(lc.name) def too_many_policies_test(self): """ AWS enforces a 25 policy per account limit this tests what happens if we create more """ launch_config_name = 'LC-' + str(time.time()) self.tester.create_launch_config(name=launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) asg_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group(group_name=asg_name, launch_config=launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5) for i in range(26): policy_name = "Policy-" + str(i + 1) self.tester.create_as_policy(name=policy_name, adjustment_type="ExactCapacity", as_name=asg_name, scaling_adjustment=0, cooldown=120) if len(self.tester.autoscale.get_all_policies()) > 25: raise Exception("More than 25 policies exist for 1 auto scaling group") self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None def too_many_as_groups(self): """ AWS imposes a 20 ASG/acct limit """ pass def clear_all(self): """ remove ALL scaling policies, auto scaling groups and launch configs """ self.tester.delete_all_policies() self.tester.delete_all_autoscaling_groups() self.tester.delete_all_launch_configs() def change_config(self): ### create initial launch configuration first_launch_config = 'First-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=first_launch_config, image_id=self.image.id, instance_type="m1.small") # create a replacement LC with different instance type second_launch_config = 'Second-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=second_launch_config, image_id=self.image.id, instance_type="m1.large") ### create auto scale group auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group(group_name=auto_scaling_group_name, launch_config=first_launch_config, availability_zones=self.tester.get_zones(), min_size=1, max_size=4, desired_capacity=1) assert isinstance(self.asg, AutoScalingGroup) self.tester.update_as_group(group_name=self.asg.name, launch_config=second_launch_config, availability_zones=self.tester.get_zones(), min_size=1, max_size=4) def wait_for_instances(number=1): self.asg = self.tester.describe_as_group(self.asg.name) instances = self.asg.instances if not instances: self.tester.debug("No instances in ASG") return False if len(self.asg.instances) != number: self.tester.debug("Instances not yet allocated") return False for instance in instances: assert isinstance(instance, Instance) instance = self.tester.get_instances(idstring=instance.instance_id)[0] if instance.state != "running": self.tester.debug("Instance: " + str(instance) + " still in " + instance.state + " state") return False else: self.tester.debug("Instance: " + str(instance) + " now running") return True self.tester.wait_for_result(wait_for_instances, True ,timeout=360) ### Set desired capacity new_desired = 2 self.asg.set_capacity(new_desired) self.tester.wait_for_result(wait_for_instances, True, number=new_desired, timeout=360) #wait briefly before changing capacity # TODO get new instance ID and get it's type verify correct type ### Delete Auto Scaling Group last_instance = self.tester.get_instances(idstring=self.tester.get_last_instance_id())[0] assert last_instance.instance_type == "m1.large" self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None ### delete launch configs self.tester.delete_launch_config(first_launch_config) self.tester.delete_launch_config(second_launch_config) def gracefully_delete(self, asg = None): if not asg: asg = self.asg assert isinstance(asg, AutoScalingGroup) try: self.tester.delete_as_group(name=asg.name, force=True) except BotoServerError, e: if e.status == 400 and e.reason == "ScalingActivityInProgress": return False return True
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument( '--clean_on_exit', action='store_true', default=True, help= 'Boolean, used to flag whether to run clean up method after running test list)' ) if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() ### Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.tester.cleanup_artifacts() self.cleanUpAutoscaling() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970,1,1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta( seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}" .format(metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) self.tester.sleep(60) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query( start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array ##Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug( 'Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics( namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics( metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics( dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension( 'AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics( 60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics( 60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() ###Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics( period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): ### setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the last instance to go to running state. state = None while not (str(state).endswith('running')): self.debug( 'Waiting for AutoScaling instance to go to running state ...') self.tester.sleep(15) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() state = self.instance.state self.debug(self.instanceid + ' is now running.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies( policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies( policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies( policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group( name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' ### This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm( 'exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm( 'change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm( 'percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 ### test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric( 'CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric( 'DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): ### The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group( name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count( 'The value AWS/ for parameter Namespace is invalid.'): self.tester.debug( 'testAwsReservedNamspaces generated expected InvalidParameterValue error.' ) return True self.tester.debug( 'testAwsReservedNamspaces did not throw expected InvalidParameterValue error.' ) return False
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() self.parser.add_argument('--clean_on_exit', action='store_true', default=True, help='Boolean, used to flag whether to run clean up method after running test list)') if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # ## Setup basic eutester object if self.args.region: self.tester = CWops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.add_keypair() self.group = self.tester.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.cleanUpAutoscaling() self.tester.cleanup_artifacts() self.tester.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start, end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970, 1, 1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug( "Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}".format( metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cw.put_metric_data(self.namespace, [metric_name], [metric_data], timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) def isMatricsAvailable(): metrics = self.tester.cw.list_metrics(namespace=self.namespace) if not metrics: return False else: return True self.tester.wait_for_result(isMatricsAvailable, True, timeout=900, poll_wait=300) metric = self.tester.cw.list_metrics(namespace=self.namespace)[0] assert isinstance(metric, Metric) stats_array = metric.query(start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum', 'SampleCount']) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array # #Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found = False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]): self.debug('Dimension ' + dimension[dimension.keys().pop()]) found = True break assert found self.debug('Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.list_metrics(namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.list_metrics(metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceId', 'NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('ImageId', 'NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('InstanceType', 'NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics( dimensions=newDimension('AutoScalingGroupName', 'NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1 = self.tester.cw.get_metric_statistics(60, start, end, 'CPUUtilization', 'AWS/EC2', 'Average', dimensions=self.instanceDimension, unit='Percent') metrics2 = self.tester.cw.get_metric_statistics(60, start, end, 'VolumeReadBytes', 'AWS/EBS', 'Average', dimensions=self.volumeDimension, unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0: return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.get_stats_array() # ##Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.get_metric_statistics(period, start, end, metricName, namespace, statisticName, dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.validateStats(values) def setUpAutoscaling(self): # ## setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.get_emi(root_device_type='instance-store') self.launch_config_name = 'ASConfig' self.auto_scaling_group_name = 'ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the instance to go to running state. self.tester.wait_for_result(self.tester.wait_for_instances, True, timeout=600, group_name=self.auto_scaling_group_name) self.instanceid = self.tester.get_last_instance_id() instance_list = self.tester.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() self.debug('ASG is now setup.') ### Create and attach a volume self.volume = self.tester.create_volume(self.zone.pop()) self.tester.attach_volume(self.instance, self.volume, '/dev/sdf') ### Get the newly created policies. self.policy_exact = self.tester.autoscale.get_all_policies(policy_names=[self.exact]) self.policy_change = self.tester.autoscale.get_all_policies(policy_names=[self.change]) self.policy_percent = self.tester.autoscale.get_all_policies(policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.delete_all_alarms() self.tester.delete_all_policies() self.tester.delete_as_group(name=self.auto_scaling_group_name, force=True) self.tester.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.describe_as_group(name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods = 1 statistic = 'Average' # ## This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.metric_alarm('exact', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.metric_alarm('change', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.metric_alarm('percent', metric, comparison, threshold, period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.put_metric_alarm(alarm_change) self.tester.put_metric_alarm(alarm_percent) self.tester.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.describe_alarms()) assert len(self.tester.describe_alarms()) >= 3 # ## test describe_alarms_for_metric for created alarms assert len( self.tester.describe_alarms_for_metric('CPUUtilization', 'AWS/EC2', dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len( self.tester.describe_alarms_for_metric('DiskReadOps', 'AWS/EC2', dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.describe_alarm_history()) assert len(self.tester.describe_alarm_history()) >= 3 pass def testAlarms(self): # ## The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.disable_alarm_actions('exact') self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService, result=True, timeout=240) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.describe_as_group(name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.put_metric_data('AWS/AnyName', 'TestMetricName', 1) except Exception, e: if str(e).count('The value AWS/ for parameter Namespace is invalid.'): self.tester.debug('testAwsReservedNamspaces generated expected InvalidParameterValue error.') return True self.tester.debug('testAwsReservedNamspaces did not throw expected InvalidParameterValue error.') return False
class AutoScalingBasics(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic eutester object if self.args.region: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) ### Add and authorize a group for the instance self.group = self.tester.add_group(group_name="group-" + str(time.time())) self.tester.authorize_group_by_name(group_name=self.group.name) self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.add_keypair("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.get_emi(root_device_type="instance-store", not_location="loadbalancer") self.address = None self.asg = None def clean_method(self): if self.asg: self.tester.wait_for_result(self.gracefully_delete, True) self.tester.delete_as_group(self.asg.name, force=True) self.tester.cleanup_artifacts() def AutoScalingBasics(self): ### create launch configuration self.launch_config_name = 'Test-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) ### create auto scale group self.auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group( group_name=self.auto_scaling_group_name, availability_zones=self.tester.get_zones(), launch_config=self.launch_config_name, min_size=0, max_size=5) ### Test Create and describe Auto Scaling Policy self.up_policy_name = "Up-Policy-" + str(time.time()) self.up_size = 4 self.tester.create_as_policy(name=self.up_policy_name, adjustment_type="ChangeInCapacity", scaling_adjustment=4, as_name=self.auto_scaling_group_name, cooldown=120) if len( self.tester.autoscale.get_all_policies( policy_names=[self.up_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.up_policy_name + ' not created') self.down_policy_name = "Down-Policy-" + str(time.time()) self.down_size = -50 self.tester.create_as_policy(name=self.down_policy_name, adjustment_type="PercentChangeInCapacity", scaling_adjustment=self.down_size, as_name=self.auto_scaling_group_name, cooldown=120) if len( self.tester.autoscale.get_all_policies( policy_names=[self.down_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.down_policy_name + ' not created') self.exact_policy_name = "Exact-Policy-" + str(time.time()) self.exact_size = 0 self.tester.create_as_policy(name=self.exact_policy_name, adjustment_type="ExactCapacity", scaling_adjustment=self.exact_size, as_name=self.auto_scaling_group_name, cooldown=120) if len( self.tester.autoscale.get_all_policies( policy_names=[self.exact_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.exact_policy_name + ' not created') self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group( self.auto_scaling_group_name).desired_capacity != self.up_size: raise Exception("Auto Scale Up not executed") self.debug( "Executed ChangeInCapacity policy, increased desired capacity to: " + str( self.tester.describe_as_group( self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute PercentChangeInCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group( self.auto_scaling_group_name ).desired_capacity != 0.5 * self.up_size: raise Exception("Auto Scale down percentage not executed") self.debug( "Executed PercentChangeInCapacity policy, decreased desired capacity to: " + str( self.tester.describe_as_group( self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ExactCapacity Auto Scaling Policy self.tester.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.describe_as_group(self.auto_scaling_group_name ).desired_capacity != self.exact_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed ExactCapacity policy, exact capacity is: " + str( self.tester.describe_as_group( self.auto_scaling_group_name).desired_capacity)) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Delete all Auto Scaling Policies self.tester.delete_all_policies() ### Test Delete Auto Scaling Group self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None ### Test delete launch config self.tester.delete_launch_config(self.launch_config_name) def scaling_activities_complete(self): activities = self.asg.get_activities() for activity in activities: assert isinstance(activity, Activity) if activity.progress != 100: return False return True def AutoScalingInstanceBasics(self): """ This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup """ pass def too_many_launch_configs_test(self): """ AWS enforces a 100 LC per account limit this tests what happens if we create more """ for i in range(101): self.launch_config_name = 'Test-Launch-Config-' + str(i + 1) self.tester.create_launch_config(name=self.launch_config_name, image_id=self.image.id) if len(self.tester.describe_launch_config()) > 100: raise Exception( "More then 100 launch configs exist in 1 account") for lc in self.tester.describe_launch_config(): self.tester.delete_launch_config(lc.name) def too_many_policies_test(self): """ AWS enforces a 25 policy per account limit this tests what happens if we create more """ launch_config_name = 'LC-' + str(time.time()) self.tester.create_launch_config(name=launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) asg_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group( group_name=asg_name, launch_config=launch_config_name, availability_zones=self.tester.get_zones(), min_size=0, max_size=5) for i in range(26): policy_name = "Policy-" + str(i + 1) self.tester.create_as_policy(name=policy_name, adjustment_type="ExactCapacity", as_name=asg_name, scaling_adjustment=0, cooldown=120) if len(self.tester.autoscale.get_all_policies()) > 25: raise Exception( "More than 25 policies exist for 1 auto scaling group") self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None def too_many_as_groups(self): """ AWS imposes a 20 ASG/acct limit """ pass def clear_all(self): """ remove ALL scaling policies, auto scaling groups and launch configs """ self.tester.delete_all_policies() self.tester.delete_all_autoscaling_groups() self.tester.delete_all_launch_configs() def change_config(self): ### create initial launch configuration first_launch_config = 'First-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=first_launch_config, image_id=self.image.id, instance_type="m1.small") # create a replacement LC with different instance type second_launch_config = 'Second-Launch-Config-' + str(time.time()) self.tester.create_launch_config(name=second_launch_config, image_id=self.image.id, instance_type="m1.large") ### create auto scale group auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.create_as_group( group_name=auto_scaling_group_name, launch_config=first_launch_config, availability_zones=self.tester.get_zones(), min_size=1, max_size=4, desired_capacity=1) assert isinstance(self.asg, AutoScalingGroup) self.tester.update_as_group(group_name=self.asg.name, launch_config=second_launch_config, availability_zones=self.tester.get_zones(), min_size=1, max_size=4) def wait_for_instances(number=1): self.asg = self.tester.describe_as_group(self.asg.name) instances = self.asg.instances if not instances: self.tester.debug("No instances in ASG") return False if len(self.asg.instances) != number: self.tester.debug("Instances not yet allocated") return False for instance in instances: assert isinstance(instance, Instance) instance = self.tester.get_instances( idstring=instance.instance_id)[0] if instance.state != "running": self.tester.debug("Instance: " + str(instance) + " still in " + instance.state + " state") return False else: self.tester.debug("Instance: " + str(instance) + " now running") return True self.tester.wait_for_result(wait_for_instances, True, timeout=360) ### Set desired capacity new_desired = 2 self.asg.set_capacity(new_desired) self.tester.wait_for_result(wait_for_instances, True, number=new_desired, timeout=360) #wait briefly before changing capacity # TODO get new instance ID and get it's type verify correct type ### Delete Auto Scaling Group last_instance = self.tester.get_instances( idstring=self.tester.get_last_instance_id())[0] assert last_instance.instance_type == "m1.large" self.tester.wait_for_result(self.gracefully_delete, True) self.asg = None ### delete launch configs self.tester.delete_launch_config(first_launch_config) self.tester.delete_launch_config(second_launch_config) def gracefully_delete(self, asg=None): if not asg: asg = self.asg assert isinstance(asg, AutoScalingGroup) try: self.tester.delete_as_group(name=asg.name, force=True) except BotoServerError, e: if e.status == 400 and e.reason == "ScalingActivityInProgress": return False return True