def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic nephoria object if self.args.region: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time())) self.tester.ec2.authorize_group_by_name(group_name=self.group.name ) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.ec2.get_emi() self.address = None self.asg = None
def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() self.parser.add_argument('--clean_on_exit', action='store_true', default=True, help='Boolean, used to flag whether to run clean up method after running test list)') if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() ### Setup basic nephoria object if self.args.region: self.tester = CWops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.ec2.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.ec2.create_keypair_and_localcert() self.group = self.tester.ec2.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in nephoria_unit_tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800)
def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic nephoria object if self.args.region: self.tester = ELBops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password ) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(int(time.time()))) self.tester.ec2.authorize_group_by_name(group_name=self.group.name) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert("keypair-" + str(int(time.time()))) self.keypath = "%s/%s.pem" % (os.curdir, self.keypair.name) ### Get an image self.image = self.args.emi if not self.image: self.image = self.tester.ec2.get_emi() ### Populate available zones zones = self.tester.ec2.connection.get_all_zones() self.zone = random.choice(zones).name self.load_balancer_port = 80 (self.web_servers, self.filename) = self.tester.ec2.create_web_servers( keypair=self.keypair, group=self.group, zone=self.zone, port=self.load_balancer_port, filename="instance-name", image=self.image, ) self.load_balancer = self.tester.elb.create_load_balancer( zones=[self.zone], name="test-" + str(int(time.time())), load_balancer_port=self.load_balancer_port ) assert isinstance(self.load_balancer, LoadBalancer) self.tester.elb.register_lb_instances(self.load_balancer.name, self.web_servers.instances)
def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--no-cleanup", action="store_true") self.get_args() # Setup basic nephoria object self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) self.testers = []
def setup_dns(self): if not hasattr(self.tester, 'service_manager'): self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password) self.tester.modify_property("bootstrap.webservices.use_dns_delegation", "true") self.tester.modify_property("bootstrap.webservices.use_instance_dns", "true") enabled_clc = self.tester.service_manager.get_enabled_clc() hostname = enabled_clc.machine.sys('hostname')[0].split(".")[0] domain = hostname + ".autoqa.qa1.eucalyptus-systems.com" self.tester.modify_property("system.dns.dnsdomain", domain) self.tester.modify_property("system.dns.nameserveraddress", enabled_clc.hostname)
class SOSreport(EutesterTestCase): def __init__(self): self.setuptestcase() self.setup_parser() self.start_time = int(time.time()) self.parser.add_argument("--ticket-number", default=str(self.start_time)) self.parser.add_argument("--timeout", default=1200, type=int) self.parser.add_argument("--remote-dir", default="/root/") self.parser.add_argument("--local-dir", default=os.getcwd()) self.parser.add_argument("--package-url", default="http://downloads.eucalyptus.com/software/tools/centos/6/x86_64/eucalyptus-sos-plugins-0.1.5-0.el6.noarch.rpm") self.get_args() self.remote_dir = self.args.remote_dir + "/euca-sosreport-" + self.args.ticket_number + "/" # Setup basic nephoria object self.tester = Eucaops( config_file=self.args.config,password=self.args.password, download_creds=False) def clean_method(self): pass def Install(self): """ This is where the test description goes """ for machine in self.tester.get_component_machines(): assert isinstance(machine, Machine) if machine.distro.name is "vmware": continue machine.install("sos") machine.sys("yum install -y " + self.args.package_url) def Run(self): error_msg = "" for machine in self.tester.get_component_machines(): try: assert isinstance(machine, Machine) if machine.distro.name is "vmware": continue machine.sys("mkdir -p " + self.args.remote_dir) machine.sys("sosreport --batch --skip-plugins=emc --tmp-dir " + self.args.remote_dir + " --ticket-number " + str(self.args.ticket_number), code=0, timeout=self.args.timeout) except Exception, e: error_msg += 'Error running SOS report on:' + str(machine.hostname) + '. Error:' + str(e) if error_msg: raise Exception(error_msg)
class CreateUser(EutesterTestCase): def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--account-number",default=1) self.parser.add_argument("--account-prefix",default="test-account-") self.parser.add_argument("--group-prefix",default="test-group-") self.parser.add_argument("--user-prefix",default="test-user-") self.parser.add_argument("--password-prefix",default="mypassword") self.parser.add_argument("--user-number",default=1) self.get_args() # Setup basic nephoria object self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) def clean_method(self): self.tester.cleanup_artifacts() def create_users(self): """ This is where the test description goes """ allow_all_policy = """{ "Statement": [ { "Action": "*", "Effect": "Allow", "Resource": "*" }] }""" for i in xrange(self.args.account_number): account_name = self.args.account_prefix + str(i) group_name = self.args.group_prefix + str(i) password = self.args.password_prefix + str(i) self.tester.iam.create_account(account_name) self.tester.iam.create_group(group_name, "/", account_name) self.tester.iam.attach_policy_group(group_name, "allow-all", allow_all_policy, account_name) for k in xrange(self.args.user_number): user_name = self.args.user_prefix + str(k) self.tester.iam.create_user(user_name, "/", account_name) self.tester.iam.add_user_to_group(group_name, user_name, account_name) self.tester.iam.create_login_profile(user_name, password, account_name)
def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.backup_file = open('account_backup.sh', 'w') self.backup_file.write("#!/bin/bash\n\nEIAMDIR=/root/USERCREDS\n\n") self.backup_file.write("mkdir $EIAMDIR; mkdir $EIAMDIR/eucalyptus-admin; cd $EIAMDIR/eucalyptus-admin\n") self.backup_file.write("euca-get-credentials admin.zip; unzip admin.zip; source eucarc; cd $EIAMDIR/\n")
def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--account-number",default=1) self.parser.add_argument("--account-prefix",default="test-account-") self.parser.add_argument("--group-prefix",default="test-group-") self.parser.add_argument("--user-prefix",default="test-user-") self.parser.add_argument("--password-prefix",default="mypassword") self.parser.add_argument("--user-number",default=1) self.get_args() # Setup basic nephoria object self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath)
def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--endpoint", default=None) self.get_args() # Setup basic nephoria object if self.args.endpoint: self.tester = S3ops(credpath=self.args.credpath, endpoint=self.args.endpoint) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) self.bucket_prefix = "nephoria-bucket-test-suite-" + str(int(time.time())) self.buckets_used = set()
def __init__(self): self.setuptestcase() self.setup_parser() self.start_time = int(time.time()) self.parser.add_argument("--ticket-number", default=str(self.start_time)) self.parser.add_argument("--timeout", default=1200, type=int) self.parser.add_argument("--remote-dir", default="/root/") self.parser.add_argument("--local-dir", default=os.getcwd()) self.parser.add_argument("--package-url", default="http://downloads.eucalyptus.com/software/tools/centos/6/x86_64/eucalyptus-sos-plugins-0.1.5-0.el6.noarch.rpm") self.get_args() self.remote_dir = self.args.remote_dir + "/euca-sosreport-" + self.args.ticket_number + "/" # Setup basic nephoria object self.tester = Eucaops( config_file=self.args.config,password=self.args.password, download_creds=False)
class SetupDNS(EutesterTestCase): def __init__(self, name="SetupDNS"): super(SetupDNS, self).__init__(name=name) self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops(config_file=self.args.config, password=self.args.password) def clean_method(self): pass def setup_dns(self): if not hasattr(self.tester, 'service_manager'): self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password) self.tester.modify_property("bootstrap.webservices.use_dns_delegation", "true") self.tester.modify_property("bootstrap.webservices.use_instance_dns", "true") enabled_clc = self.tester.service_manager.get_enabled_clc() hostname = enabled_clc.machine.sys('hostname')[0].split(".")[0] domain = hostname + ".autoqa.qa1.eucalyptus-systems.com" self.tester.modify_property("system.dns.dnsdomain", domain) self.tester.modify_property("system.dns.nameserveraddress", enabled_clc.hostname)
def __init__(self, name="SetupDNS"): super(SetupDNS, self).__init__(name=name) self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops(config_file=self.args.config, password=self.args.password)
class IamBackup(EutesterTestCase): def __init__(self): self.setuptestcase() self.setup_parser() self.get_args() self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.backup_file = open('account_backup.sh', 'w') self.backup_file.write("#!/bin/bash\n\nEIAMDIR=/root/USERCREDS\n\n") self.backup_file.write("mkdir $EIAMDIR; mkdir $EIAMDIR/eucalyptus-admin; cd $EIAMDIR/eucalyptus-admin\n") self.backup_file.write("euca-get-credentials admin.zip; unzip admin.zip; source eucarc; cd $EIAMDIR/\n") def clean_method(self): pass def backup_all(self): accounts = [] all_accounts = self.tester.iam.get_all_accounts() for account in all_accounts: if not re.search('eucalyptus', account['account_name']): accounts.append(account) for account in accounts: account_name = account['account_name'] self.backup_file.write("euare-accountcreate -a %s\n" % account_name) self.backup_file.write("mkdir $EIAMDIR/%s; cd $EIAMDIR/%s\n" % (account_name, account_name)) self.backup_file.write("euca-get-credentials -a %s %s.zip\n" % (account_name, account_name)) self.backup_file.write("unzip %s.zip; source eucarc\n" % account_name) new_tester = Eucaops(config_file=self.args.config, password=self.args.password, account=account_name, user='******') users = new_tester.iam.get_users_from_account() for user in users: user_name = user['user_name'] user_path = user['path'] self.tester.debug("Got user name '%s'" % user_name) if user_name != 'admin': self.backup_file.write("euare-usercreate -u %s -p %s\n" % (user_name, user_path)) self.backup_file.write("mkdir $EIAMDIR/%s/%s; cd $EIAMDIR/%s/%s\n" % (account_name, user_name, account_name, user_name)) self.backup_file.write("euca-get-credentials -a %s -u %s %s.zip; cd $EIAMDIR/%s\n" % (account_name, user_name, user_name, account_name)) self.tester.debug("Getting policies of user: '******'" % user_name) user_policies = new_tester.iam.get_user_policies(user_name) for policy in user_policies: pol = urllib2.unquote(policy['policy_document']) pol = pol.replace('\'', '\"') self.backup_file.write("euare-useruploadpolicy -u %s -p %s -o '%s'\n" % (user_name, policy['policy_name'], pol)) groups = new_tester.iam.get_groups_from_account() for group in groups: group_name = group['group_name'] group_path = group['path'] self.backup_file.write("euare-groupcreate -g %s -p %s\n" % (group_name, group_path)) group_users = new_tester.iam.get_users_from_group(group_name=group_name) for user in group_users: self.backup_file.write("euare-groupadduser -g %s -u %s\n" % (group_name, user['user_name'])) self.backup_file.write("cd $EIAMDIR/\n") self.backup_file.write("source $EIAMDIR/eucalyptus-admin/eucarc\n") self.backup_file.write("\n") self.backup_file.close()
def __init__( self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None, user_data=None, instance_user=None, **kwargs): """ EC2 API nephoria_unit_tests focused on instance store instances :param credpath: Path to directory containing eucarc file :param region: EC2 Region to run testcase in :param config_file: Configuration file path :param password: SSH password for bare metal machines if config is passed and keys arent synced :param emi: Image id to use for test :param zone: Availability Zone to run test in :param user_data: User Data to pass to instance :param instance_user: User to login to instance as :param kwargs: Additional arguments """ super(InstanceBasics, self).__init__(name=name) self.get_args() self.show_args() for kw in kwargs: print 'Setting kwarg:'+str(kw)+" to "+str(kwargs[kw]) self.set_arg(kw ,kwargs[kw]) self.show_args() if self.args.region: self.tester = EC2ops(credpath=self.args.redpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password, credpath=self.args.credpath) self.instance_timeout = 600 ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time())) self.tester.ec2.authorize_group_by_name(group_name=self.group.name) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) if emi: self.image = self.tester.ec2.get_emi(emi=self.args.emi) else: self.image = self.tester.ec2.get_emi(root_device_type="instance-store", basic_image=True) self.address = None self.volume = None self.private_addressing = False if not self.args.zone: zones = self.tester.ec2.connection.get_all_zones() self.zone = random.choice(zones).name else: self.zone = self.args.zone self.reservation = None self.reservation_lock = threading.Lock() self.run_instance_params = {'image': self.image, 'user_data': self.args.user_data, 'username': self.args.instance_user, 'keypair': self.keypair.name, 'group': self.group.name, 'zone': self.zone, 'return_reservation': True, 'timeout': self.instance_timeout} self.managed_network = True ### If I have access to the underlying infrastructure I can look ### at the network mode and only run certain nephoria_unit_tests where it makes sense if hasattr(self.tester, "service_manager"): cc = self.tester.get_component_machines("cc")[0] network_mode = cc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0] if re.search("(SYSTEM|STATIC)", network_mode): self.managed_network = False
class InstanceBasics(EutesterTestCase): def __init__( self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None, user_data=None, instance_user=None, **kwargs): """ EC2 API nephoria_unit_tests focused on instance store instances :param credpath: Path to directory containing eucarc file :param region: EC2 Region to run testcase in :param config_file: Configuration file path :param password: SSH password for bare metal machines if config is passed and keys arent synced :param emi: Image id to use for test :param zone: Availability Zone to run test in :param user_data: User Data to pass to instance :param instance_user: User to login to instance as :param kwargs: Additional arguments """ super(InstanceBasics, self).__init__(name=name) self.get_args() self.show_args() for kw in kwargs: print 'Setting kwarg:'+str(kw)+" to "+str(kwargs[kw]) self.set_arg(kw ,kwargs[kw]) self.show_args() if self.args.region: self.tester = EC2ops(credpath=self.args.redpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config_file, password=self.args.password, credpath=self.args.credpath) self.instance_timeout = 600 ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time())) self.tester.ec2.authorize_group_by_name(group_name=self.group.name) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert("keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) if emi: self.image = self.tester.ec2.get_emi(emi=self.args.emi) else: self.image = self.tester.ec2.get_emi(root_device_type="instance-store", basic_image=True) self.address = None self.volume = None self.private_addressing = False if not self.args.zone: zones = self.tester.ec2.connection.get_all_zones() self.zone = random.choice(zones).name else: self.zone = self.args.zone self.reservation = None self.reservation_lock = threading.Lock() self.run_instance_params = {'image': self.image, 'user_data': self.args.user_data, 'username': self.args.instance_user, 'keypair': self.keypair.name, 'group': self.group.name, 'zone': self.zone, 'return_reservation': True, 'timeout': self.instance_timeout} self.managed_network = True ### If I have access to the underlying infrastructure I can look ### at the network mode and only run certain nephoria_unit_tests where it makes sense if hasattr(self.tester, "service_manager"): cc = self.tester.get_component_machines("cc")[0] network_mode = cc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0] if re.search("(SYSTEM|STATIC)", network_mode): self.managed_network = False def set_reservation(self, reservation): self.reservation_lock.acquire() self.reservation = reservation self.reservation_lock.release() def clean_method(self): self.tester.cleanup_artifacts() def BasicInstanceChecks(self): """ This case was developed to run through a series of basic instance nephoria_unit_tests. The nephoria_unit_tests are as follows: - execute run_instances command - make sure that public DNS name and private IP aren't the same (This is for Managed/Managed-NOVLAN networking modes) - test to see if instance is ping-able - test to make sure that instance is accessible via ssh (ssh into instance and run basic ls command) If any of these nephoria_unit_tests fail, the test case will error out, logging the results. """ reservation = self.tester.ec2.run_image(**self.run_instance_params) for instance in reservation.instances: self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Instance did not go to running') self.assertTrue(self.tester.ping(instance.ip_address), 'Could not ping instance') if self.image.virtualization_type == "paravirtual": paravirtual_ephemeral = "/dev/" + instance.rootfs_device + "2" self.assertFalse(instance.found("ls -1 " + paravirtual_ephemeral, "No such file or directory"), "Did not find ephemeral storage at " + paravirtual_ephemeral) elif self.image.virtualization_type == "hvm": hvm_ephemeral = "/dev/" + instance.block_device_prefix + "b" self.assertFalse(instance.found("ls -1 " + hvm_ephemeral, "No such file or directory"), "Did not find ephemeral storage at " + hvm_ephemeral) self.debug("Pinging instance public IP from inside instance") instance.sys('ping -c 1 ' + instance.ip_address, code=0) self.debug("Pinging instance private IP from inside instance") instance.sys('ping -c 1 ' + instance.private_ip_address, code=0) self.set_reservation(reservation) return reservation def ElasticIps(self): """ This case was developed to test elastic IPs in Eucalyptus. This test case does not test instances that are launched using private-addressing option. The test case executes the following nephoria_unit_tests: - allocates an IP, associates the IP to the instance, then pings the instance. - disassociates the allocated IP, then pings the instance. - releases the allocated IP address If any of the nephoria_unit_tests fail, the test case will error out, logging the results. """ if not self.reservation: reservation = self.tester.ec2.run_image(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: if instance.ip_address == instance.private_ip_address: self.tester.debug("WARNING: System or Static mode detected, skipping ElasticIps") return reservation self.address = self.tester.ec2.allocate_address(domain=instance.vpc_id) self.assertTrue(self.address, 'Unable to allocate address') self.tester.ec2.associate_address(instance, self.address) instance.update() self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping instance with new IP") self.tester.ec2.disassociate_address_from_instance(instance) self.tester.ec2.release_address(self.address) self.address = None assert isinstance(instance, EuInstance) self.tester.sleep(5) instance.update() self.assertTrue(self.tester.ping(instance.ip_address), "Could not ping after dissassociate") self.set_reservation(reservation) return reservation def MultipleInstances(self): """ This case was developed to test the maximum number of m1.small vm types a configured cloud can run. The test runs the maximum number of m1.small vm types allowed, then nephoria_unit_tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.ec2.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.ec2.run_image(min=2, max=2, **self.run_instance_params) self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Not all instances went to running') self.set_reservation(reservation) return reservation def LargestInstance(self): """ This case was developed to test the maximum number of c1.xlarge vm types a configured cloud can run. The test runs the maximum number of c1.xlarge vm types allowed, then nephoria_unit_tests to see if all the instances reached a running state. If there is a failure, the test case errors out; logging the results. """ if self.reservation: self.tester.ec2.terminate_instances(self.reservation) self.set_reservation(None) reservation = self.tester.ec2.run_image(type="c1.xlarge", **self.run_instance_params) self.assertTrue(self.tester.ec2.wait_for_reservation(reservation), 'Not all instances went to running') self.set_reservation(reservation) return reservation def MetaData(self): """ This case was developed to test the metadata service of an instance for consistency. The following meta-data attributes are tested: - public-keys/0/openssh-key - security-groups - instance-id - local-ipv4 - public-ipv4 - ami-id - ami-launch-index - reservation-id - placement/availability-zone - kernel-id - public-hostname - local-hostname - hostname - ramdisk-id - instance-type - any bad metadata that shouldn't be present. Missing nodes ['block-device-mapping/', 'ami-manifest-path'] If any of these nephoria_unit_tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.ec2.run_image(**self.run_instance_params) else: reservation = self.reservation for instance in reservation.instances: ## Need to verify the public key (could just be checking for a string of a certain length) self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata') self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') # Need to validate block device mapping #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata') self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0], instance.private_ip_address), 'Incorrect private ip in metadata') self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0], instance.ip_address), 'Incorrect public ip in metadata') self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata') self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata') self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], reservation.id), 'Incorrect reservation in metadata') self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata') if self.image.virtualization_type == "paravirtual": self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel), 'Incorrect kernel id in metadata') self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk), 'Incorrect ramdisk in metadata') self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata') self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata') self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.private_dns_name), 'Incorrect host name in metadata') self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type), 'Incorrect instance type in metadata') bad_meta_data_keys = ['foobar'] for key in bad_meta_data_keys: self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node') self.set_reservation(reservation) return reservation def DNSResolveCheck(self): """ This case was developed to test DNS resolution information for public/private DNS names and IP addresses. The tested DNS resolution behavior is expected to follow AWS EC2. The following nephoria_unit_tests are ran using the associated meta-data attributes: - check to see if Eucalyptus Dynamic DNS is configured - nslookup on hostname; checks to see if it matches local-ipv4 - nslookup on local-hostname; check to see if it matches local-ipv4 - nslookup on local-ipv4; check to see if it matches local-hostname - nslookup on public-hostname; check to see if it matches local-ipv4 - nslookup on public-ipv4; check to see if it matches public-host If any of these nephoria_unit_tests fail, the test case will error out; logging the results. """ if not self.reservation: reservation = self.tester.ec2.run_image(**self.run_instance_params) else: reservation = self.reservation def validate_instance_dns(): try: for instance in reservation.instances: if not re.search("internal", instance.private_dns_name): self.tester.debug("Did not find instance DNS enabled, skipping test") self.set_reservation(reservation) return reservation self.debug('\n' '# Test to see if Dynamic DNS has been configured \n' '# Per AWS standard, resolution should have private hostname or ' 'private IP as a valid response\n' '# Perform DNS resolution against public IP and public DNS name\n' '# Perform DNS resolution against private IP and private DNS name\n' '# Check to see if nslookup was able to resolve\n') assert isinstance(instance, EuInstance) self.debug('Check nslookup to resolve public DNS Name to local-ipv4 address') self.assertTrue(instance.found("nslookup " + instance.public_dns_name, instance.private_ip_address), "Incorrect DNS resolution for hostname.") self.debug('Check nslookup to resolve public-ipv4 address to public DNS name') if self.managed_network: self.assertTrue(instance.found("nslookup " + instance.ip_address, instance.public_dns_name), "Incorrect DNS resolution for public IP address") self.debug('Check nslookup to resolve private DNS Name to local-ipv4 address') if self.managed_network: self.assertTrue(instance.found("nslookup " + instance.private_dns_name, instance.private_ip_address), "Incorrect DNS resolution for private hostname.") self.debug('Check nslookup to resolve local-ipv4 address to private DNS name') self.assertTrue(instance.found("nslookup " + instance.private_ip_address, instance.private_dns_name), "Incorrect DNS resolution for private IP address") self.debug('Attempt to ping instance public_dns_name') self.assertTrue(self.tester.ping(instance.public_dns_name)) return True except Exception, e: return False self.tester.ec2.wait_for_result(validate_instance_dns, True, timeout=120) self.set_reservation(reservation) return reservation
class BucketTestSuite(EutesterTestCase): def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--endpoint", default=None) self.get_args() # Setup basic nephoria object if self.args.endpoint: self.tester = S3ops(credpath=self.args.credpath, endpoint=self.args.endpoint) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) self.bucket_prefix = "nephoria-bucket-test-suite-" + str(int(time.time())) self.buckets_used = set() def test_bucket_get_put_delete(self): """ Method: Tests creating and deleting buckets as well as getting the bucket listing """ test_bucket = self.bucket_prefix + "-simple-test-bucket" self.buckets_used.add(test_bucket) self.tester.debug("Starting get/put/delete bucket test using bucket name: " + test_bucket) try: bucket = self.tester.s3.create_bucket(test_bucket) if bucket == None: self.tester.s3.delete_bucket(test_bucket) self.fail(test_bucket + " was not created correctly") except (S3ResponseError, S3CreateError) as e: self.fail(test_bucket + " create caused exception: " + e) try: bucket = self.tester.s3.get_bucket(test_bucket) if bucket == None: self.tester.s3.delete_bucket(test_bucket) self.fail(test_bucket +" was not fetched by get_bucket call") except S3ResponseError as e: self.tester.s3.delete_bucket(test_bucket) self.fail("Exception getting bucket" + e) self.tester.s3.delete_bucket(test_bucket) try: if self.tester.s3.get_bucket(test_bucket) != None: self.tester.s3.delete_bucket(test_bucket) self.fail("Delete of " + test_bucket + " failed, still exists") except S3ResponseError as e: self.tester.debug( "Correctly got exception trying to get a deleted bucket! " ) self.tester.debug( "Testing an invalid bucket names, calls should fail." ) def test_creating_bucket_invalid_names(bad_bucket): should_fail = False try: bucket = self.tester.s3.create_bucket(bad_bucket) should_fail = True try: self.tester.s3.delete_bucket(bucket) except: self.tester.debug("Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail") except Exception as e: self.tester.debug("Correctly caught the exception for bucket name '" + bad_bucket + "' Reason: " + e.reason) if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) # with the EUCA-8864 fix, a new property 'objectstorage.bucket_naming_restrictions' # has been introduced, now 'bucket..123', 'bucket.' are actually valid bucket names # when using 'extended' naming convention. # http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html # when DNS is not being used, for now buckets can be created with bucket # names like '/bucket123', 'bucket123/', see EUCA-8863 # TODO check what bucket naming convention is being used for the test for bad_bucket in ["bucket&123", "bucket*123"]: test_creating_bucket_invalid_names(self.bucket_prefix + bad_bucket) """ Test creating bucket with null name """ try: null_bucket_name = "" bucket_obj = self.tester.s3.create_bucket(null_bucket_name) self.tester.sleep(10) if bucket_obj: self.fail("Should have caught exception for creating bucket with empty-string name.") except S3ResponseError as e: assert (e.status == 405), 'Expected response status code to be 405, actual status code is ' + str(e.status) assert (re.search("MethodNotAllowed", e.code)), "Incorrect exception returned when creating bucket with null name." except Exception, e: self.tester.debug("Failed due to EUCA-7059 " + str(e))
class AutoScalingBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic nephoria object if self.args.region: self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(time.time())) self.tester.ec2.authorize_group_by_name(group_name=self.group.name ) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" ) ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert( "keypair-" + str(time.time())) self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name) self.image = self.args.emi if not self.image: self.image = self.tester.ec2.get_emi() self.address = None self.asg = None def clean_method(self): if self.asg: self.tester.wait_for_result(self.gracefully_delete, True) self.tester.autoscaling.delete_as_group(self.asg.name, force=True) self.tester.cleanup_artifacts() def AutoScalingBasics(self): ### create launch configuration self.launch_config_name = 'Test-Launch-Config-' + str(time.time()) self.tester.autoscaling.create_launch_config(name=self.launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) ### create auto scale group self.auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.autoscaling.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.tester.ec2.get_zones(), launch_config=self.launch_config_name, min_size=0, max_size=5) ### Test Create and describe Auto Scaling Policy self.up_policy_name = "Up-Policy-" + str(time.time()) self.up_size = 4 self.tester.autoscaling.create_as_policy(name=self.up_policy_name, adjustment_type="ChangeInCapacity", scaling_adjustment=4, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.up_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.up_policy_name +' not created') self.down_policy_name = "Down-Policy-" + str(time.time()) self.down_size = -50 self.tester.autoscaling.create_as_policy(name=self.down_policy_name, adjustment_type="PercentChangeInCapacity", scaling_adjustment=self.down_size, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.down_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.down_policy_name +' not created') self.exact_policy_name = "Exact-Policy-" + str(time.time()) self.exact_size = 0 self.tester.autoscaling.create_as_policy(name=self.exact_policy_name, adjustment_type="ExactCapacity", scaling_adjustment=self.exact_size, as_name=self.auto_scaling_group_name, cooldown=120) if len(self.tester.autoscaling.connection.get_all_policies(policy_names=[self.exact_policy_name])) != 1: raise Exception('Auto Scaling policies: ' + self.exact_policy_name +' not created') self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " + self.exact_policy_name) self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ChangeInCapacity Auto Scaling Policy self.tester.autoscaling.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.up_size: raise Exception("Auto Scale Up not executed") self.debug("Executed ChangeInCapacity policy, increased desired capacity to: " + str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute PercentChangeInCapacity Auto Scaling Policy self.tester.autoscaling.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != 0.5 * self.up_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " + str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Execute ExactCapacity Auto Scaling Policy self.tester.autoscaling.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name, honor_cooldown=False) if self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.exact_size: raise Exception("Auto Scale down percentage not executed") self.debug("Executed ExactCapacity policy, exact capacity is: " + str(self.tester.autoscaling.describe_as_group(self.auto_scaling_group_name).desired_capacity)) self.tester.autoscaling.wait_for_result(self.scaling_activities_complete, True, timeout=180) ### Test Delete all Auto Scaling Policies self.tester.autoscaling.delete_all_policies() ### Test Delete Auto Scaling Group self.tester.autoscaling.wait_for_result(self.gracefully_delete, True) self.asg = None ### Test delete launch config self.tester.autoscaling.delete_launch_config(self.launch_config_name) def scaling_activities_complete(self): activities = self.asg.get_activities() for activity in activities: assert isinstance(activity,Activity) if activity.progress != 100: return False return True def AutoScalingInstanceBasics(self): """ This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup """ pass def too_many_launch_configs_test(self): """ AWS enforces a 100 LC per account limit this nephoria_unit_tests what happens if we create more """ for i in range(101): self.launch_config_name = 'Test-Launch-Config-' + str(i + 1) self.tester.autoscaling.create_launch_config(name=self.launch_config_name, image_id=self.image.id) if len(self.tester.autoscaling.describe_launch_config()) > 100: raise Exception("More then 100 launch configs exist in 1 account") for lc in self.tester.autoscaling.describe_launch_config(): self.tester.autoscaling.delete_launch_config(lc.name) def too_many_policies_test(self): """ AWS enforces a 25 policy per account limit this nephoria_unit_tests what happens if we create more """ launch_config_name = 'LC-' + str(time.time()) self.tester.autoscaling.create_launch_config(name=launch_config_name, image_id=self.image.id, instance_type="m1.small", key_name=self.keypair.name, security_groups=[self.group.name]) asg_name = 'ASG-' + str(time.time()) self.asg = self.tester.autoscaling.create_as_group(group_name=asg_name, launch_config=launch_config_name, availability_zones=self.tester.autoscaling.get_zones(), min_size=0, max_size=5) for i in range(26): policy_name = "Policy-" + str(i + 1) self.tester.autoscaling.create_as_policy(name=policy_name, adjustment_type="ExactCapacity", as_name=asg_name, scaling_adjustment=0, cooldown=120) if len(self.tester.autoscaling.autoscale.get_all_policies()) > 25: raise Exception("More than 25 policies exist for 1 auto scaling group") self.tester.autoscaling.wait_for_result(self.gracefully_delete, True) self.asg = None def too_many_as_groups(self): """ AWS imposes a 20 ASG/acct limit """ pass def clear_all(self): """ remove ALL scaling policies, auto scaling groups and launch configs """ self.tester.autoscaling.delete_all_policies() self.tester.autoscaling.delete_all_autoscaling_groups() self.tester.autoscaling.delete_all_launch_configs() def change_config(self): ### create initial launch configuration first_launch_config = 'First-Launch-Config-' + str(time.time()) self.tester.autoscaling.create_launch_config(name=first_launch_config, image_id=self.image.id, instance_type="m1.small") # create a replacement LC with different instance type second_launch_config = 'Second-Launch-Config-' + str(time.time()) self.tester.autoscaling.create_launch_config(name=second_launch_config, image_id=self.image.id, instance_type="m1.large") ### create auto scale group auto_scaling_group_name = 'ASG-' + str(time.time()) self.asg = self.tester.autoscaling.create_as_group(group_name=auto_scaling_group_name, launch_config=first_launch_config, availability_zones=self.tester.ec2.get_zones(), min_size=1, max_size=4, desired_capacity=1) assert isinstance(self.asg, AutoScalingGroup) self.tester.autoscaling.wait_for_result(self.tester.autoscaling.wait_for_instances, True, timeout=360, group_name=self.asg.name, tester=self.tester) self.tester.autoscaling.update_as_group(group_name=self.asg.name, launch_config=second_launch_config, availability_zones=self.tester.ec2.get_zones(), min_size=1, max_size=4) ### Set desired capacity new_desired = 2 self.asg.set_capacity(new_desired) self.tester.autoscaling.wait_for_result(self.tester.autoscaling.wait_for_instances, True, timeout=360, group_name=self.asg.name, number=new_desired, tester=self.tester) last_instance = self.tester.ec2.get_instances( idstring=self.tester.autoscaling.get_last_instance_id(tester=self.tester))[0] assert last_instance.instance_type == "m1.large" ### Delete Auto Scaling Group self.tester.autoscaling.wait_for_result(self.gracefully_delete, True) self.asg = None ### delete launch configs self.tester.autoscaling.delete_launch_config(first_launch_config) self.tester.autoscaling.delete_launch_config(second_launch_config) def gracefully_delete(self, asg = None): if not asg: asg = self.asg assert isinstance(asg, AutoScalingGroup) try: self.tester.autoscaling.delete_as_group(name=asg.name, force=True) except BotoServerError, e: if e.status == 400 and e.reason == "ScalingActivityInProgress": return False return True
class ResourceGeneration(EutesterTestCase): def __init__(self): self.setuptestcase() self.setup_parser() self.parser.add_argument("--no-cleanup", action="store_true") self.get_args() # Setup basic nephoria object self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password) self.testers = [] def clean_method(self): if not self.args.no_cleanup: for tester in self.testers: try: tester.show_whoami() except: pass tester.cleanup_artifacts() def create_resources(self): users = self.tester.iam.get_all_users() self.testers.append(self.tester) try: self.tester.iam.show_all_users() except: pass for user in users: user_name = user["user_name"] user_account = user["account_name"] if not re.search("eucalyptus", user_account): self.tester.debug("Creating access key for " + user_name + " in account " + user_account) keys = self.tester.iam.create_access_key(user_name=user_name, delegate_account=user_account) access_key = keys["access_key_id"] secret_key = keys["secret_access_key"] self.tester.debug( "Creating Eucaops object with access key " + access_key + " and secret key " + secret_key ) new_tester = Eucaops( username=user_name, account=user_account, aws_access_key_id=access_key, aws_secret_access_key=secret_key, ec2_ip=self.tester.ec2.connection.host, ec2_path=self.tester.ec2.connection.path, iam_ip=self.tester.iam.connection.host, iam_path=self.tester.iam.connection.path, s3_ip=self.tester.s3.connection.host, s3_path=self.tester.s3.connection.path, sts_ip=self.tester.token.connection.host, sts_path=self.tester.token.connection.path, cw_ip=self.tester.cloudwatch.connection.host, cw_path=self.tester.cloudwatch.connection.path, as_ip=self.tester.autoscaling.connection.host, as_path=self.tester.autoscaling.connection.path, elb_ip=self.tester.elb.connection.host, elb_path=self.tester.elb.connection.path, ) self.testers.append(new_tester) self.tester.debug("Created a total of " + str(len(self.testers)) + " testers") try: self.tester.iam.show_all_users() except: pass for resource_tester in self.testers: import random assert isinstance(resource_tester, Eucaops) try: resource_tester.iam.show_whoami() except: pass zone = random.choice(resource_tester.ec2.get_zones()) keypair = resource_tester.ec2.create_keypair_and_localcert(resource_tester.id_generator()) group = resource_tester.ec2.add_group(resource_tester.id_generator()) resource_tester.ec2.authorize_group_by_name(group_name=group.name) resource_tester.ec2.authorize_group_by_name(group_name=group.name, port=-1, protocol="icmp") reservation = resource_tester.ec2.run_instance(keypair=keypair.name, group=group.name, zone=zone) instance = reservation.instances[0] assert isinstance(instance, EuInstance) if not instance.ip_address == instance.private_ip_address: self.tester.ec2.show_all_addresses_verbose() address = resource_tester.ec2.allocate_address() resource_tester.ec2.associate_address(instance=instance, address=address) resource_tester.ec2.disassociate_address_from_instance(instance) if not self.args.no_cleanup: resource_tester.ec2.release_address(address) self.tester.sleep(20) instance.update() instance.reset_ssh_connection() volume = resource_tester.ec2.create_volume(size=1, zone=zone) instance.attach_volume(volume) snapshot = resource_tester.ec2.create_snapshot(volume_id=volume.id) volume_from_snap = resource_tester.ec2.create_volume(snapshot=snapshot, zone=zone) bucket = resource_tester.s3.create_bucket( resource_tester.id_generator(12, string.ascii_lowercase + string.digits) ) key = resource_tester.s3.upload_object( bucket_name=bucket.name, key_name=resource_tester.id_generator(12, string.ascii_lowercase + string.digits), contents=resource_tester.id_generator(200), ) if not self.args.no_cleanup: resource_tester.ec2.terminate_instances(reservation)
class LoadBalancing(EutesterTestCase): def __init__(self, extra_args=None): self.setuptestcase() self.setup_parser() if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() # Setup basic nephoria object if self.args.region: self.tester = ELBops(credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password ) self.tester.poll_count = 120 ### Add and authorize a group for the instance self.group = self.tester.ec2.add_group(group_name="group-" + str(int(time.time()))) self.tester.ec2.authorize_group_by_name(group_name=self.group.name) self.tester.ec2.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp") ### Generate a keypair for the instance self.keypair = self.tester.ec2.create_keypair_and_localcert("keypair-" + str(int(time.time()))) self.keypath = "%s/%s.pem" % (os.curdir, self.keypair.name) ### Get an image self.image = self.args.emi if not self.image: self.image = self.tester.ec2.get_emi() ### Populate available zones zones = self.tester.ec2.connection.get_all_zones() self.zone = random.choice(zones).name self.load_balancer_port = 80 (self.web_servers, self.filename) = self.tester.ec2.create_web_servers( keypair=self.keypair, group=self.group, zone=self.zone, port=self.load_balancer_port, filename="instance-name", image=self.image, ) self.load_balancer = self.tester.elb.create_load_balancer( zones=[self.zone], name="test-" + str(int(time.time())), load_balancer_port=self.load_balancer_port ) assert isinstance(self.load_balancer, LoadBalancer) self.tester.elb.register_lb_instances(self.load_balancer.name, self.web_servers.instances) def clean_method(self): self.tester.cleanup_artifacts() def GenerateRequests(self): """ This will test the most basic use case for a load balancer. Uses to backend instances with httpd servers. """ dns = self.tester.service_manager.get_enabled_dns() lb_ip = dns.resolve(self.load_balancer.dns_name) lb_url = "http://{0}:{1}/instance-name".format(lb_ip, self.load_balancer_port) self.tester.elb.generate_http_requests(url=lb_url, count=1000)
class CloudWatchBasics(EutesterTestCase): def __init__(self, extra_args= None): self.setuptestcase() self.setup_parser() self.parser.add_argument('--clean_on_exit', action='store_true', default=True, help='Boolean, used to flag whether to run clean up method after running test list)') if extra_args: for arg in extra_args: self.parser.add_argument(arg) self.get_args() ### Setup basic nephoria object if self.args.region: self.tester = CWops( credpath=self.args.credpath, region=self.args.region) else: self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath) self.start_time = str(int(time.time())) self.zone = self.tester.ec2.get_zones() self.namespace = 'Namespace-' + self.start_time self.keypair = self.tester.ec2.create_keypair_and_localcert() self.group = self.tester.ec2.add_group() ### Setup AutoScaling self.setUpAutoscaling() ### Create Dimensions used in nephoria_unit_tests self.instanceDimension = newDimension('InstanceId', self.instanceid) self.volumeDimension = newDimension('VolumeId', self.volume.id) self.autoScalingDimension = newDimension('AutoScalingGroupName', self.auto_scaling_group_name) ### Setup Alarms self.setUpAlarms() ### Wait for metrics to populate, timeout 30 minute self.tester.wait_for_result(self.IsMetricsListPopulated, result=True, timeout=1800) def clean_method(self): self.cleanUpAutoscaling() self.tester.cleanup_artifacts() self.tester.ec2.delete_keypair(self.keypair) pass def get_time_window(self, end=None, **kwargs): if not end: end = datetime.datetime.utcnow() start = end - datetime.timedelta(**kwargs) return (start,end) def print_timeseries_for_graphite(self, timeseries): for datapoint in timeseries: print 'graph.Namespace-1361426618 ' + str(int(datapoint['Average'])) + ' ' + \ str((datapoint['Timestamp'] - datetime.datetime(1970,1,1)).total_seconds()) def PutDataGetStats(self): assert self.testAwsReservedNamspaces() seconds_to_put_data = 120 metric_data = 1 time_string = str(int(time.time())) metric_name = "Metric-" + time_string incrementing = True while datetime.datetime.now().second != 0: self.tester.debug("Waiting for minute edge") self.tester.sleep(1) start = datetime.datetime.utcnow() - datetime.timedelta(seconds=seconds_to_put_data) for i in xrange(seconds_to_put_data): timestamp = start + datetime.timedelta(seconds=i) self.tester.debug("Adding metric: {metric} to namespace: {namespace} with value {value} at {timestamp}" .format(metric=metric_name, namespace=self.namespace, value=metric_data, timestamp=timestamp)) self.tester.cloudwatch.put_metric_data(self.namespace, [metric_name],[metric_data],timestamp=timestamp) if metric_data == 600 or metric_data == 0: incrementing = not incrementing if incrementing: metric_data += 1 else: metric_data -= 1 end = start + datetime.timedelta(seconds=seconds_to_put_data) self.tester.sleep(60) metric = self.tester.cloudwatch.list_metrics(namespace=self.namespace)[0] assert isinstance(metric,Metric) stats_array = metric.query(start_time=start, end_time=end, statistics=['Average', 'Sum', 'Maximum', 'Minimum','SampleCount'] ) assert len(stats_array) == 2 if stats_array[0]['Minimum'] == 1: first_sample = stats_array[0] second_sample = stats_array[1] else: second_sample = stats_array[0] first_sample = stats_array[1] print stats_array ##Check sample 1 assert first_sample['Maximum'] <= 60 and first_sample['Minimum'] > 0 assert first_sample['Average'] < 34 and first_sample['Average'] > 26 assert first_sample['Sum'] < 1900 and first_sample['Sum'] > 1500 assert first_sample['SampleCount'] > 50 ##Check sample 2 assert second_sample['Maximum'] <= 120 and second_sample['Minimum'] > 50 assert second_sample['Average'] < 95 and second_sample['Average'] > 80 assert second_sample['Sum'] < 6100 and second_sample['Sum'] > 4600 assert second_sample['SampleCount'] > 50 assert first_sample['Average'] < second_sample['Average'] assert first_sample['Sum'] < second_sample['Sum'] assert first_sample['Maximum'] < second_sample['Maximum'] assert first_sample['Minimum'] < second_sample['Minimum'] def ListMetrics(self, metricNames, dimension): self.debug('Get Metric list') metricList = self.tester.cloudwatch.list_metrics(dimensions=dimension) self.debug('Checking to see if list is populated at all.') assert len(metricList) > 0 self.debug('Make sure dimensions are listed.') found=False for metric in metricList: self.debug(metric.dimensions) if str(metric.dimensions).count(dimension[dimension.keys().pop()]) : self.debug('Dimension ' + dimension[dimension.keys().pop()]) found=True break assert found self.debug('Checking to see if we get all the expected instance metrics.') for metric in metricNames: assert str(metricList).count(metric['name']) > 0 self.debug('Metric ' + metric['name']) pass def checkMetricFilters(self): self.debug('Check list_metrics filtering parameters') metricList = self.tester.cloudwatch.list_metrics(namespace='AWS/EC2') assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(namespace='AWS/EBS') assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(namespace='NonExistent-NameSpace') assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(metric_name='CPUUtilization') assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(metric_name='NonExistent-Metric-Name') assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=self.instanceDimension) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceId','NonExistent-InstanceId')) assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('VolumeId','NonExistent-VolumeId')) assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('ImageId', self.image.id)) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('ImageId','NonExistent-imageId')) assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceType', self.instance_type)) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('InstanceType','NonExistent-InstanceType')) assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=self.autoScalingDimension) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('AutoScalingGroupName','NonExistent-AutoScalingGroupName')) assert len(metricList) == 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=self.volumeDimension) assert len(metricList) > 0 metricList = self.tester.cloudwatch.list_metrics(dimensions=newDimension('VolumeId', 'NonExistent-VolumeId')) assert len(metricList) == 0 pass def IsMetricsListPopulated(self): end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) metrics1=self.tester.cloudwatch.get_metric_statistics(60,start,end,'CPUUtilization','AWS/EC2','Average',dimensions=self.instanceDimension,unit='Percent') metrics2=self.tester.cloudwatch.get_metric_statistics(60,start,end,'VolumeReadBytes','AWS/EBS','Average',dimensions=self.volumeDimension,unit='Bytes') if len(metrics1) > 0 and len(metrics2) > 0 : return True else: return False def GetMetricStatistics(self, metricNames, namespace, dimension): period = 60 end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=20) stats = self.tester.cloudwatch.get_stats_array() ###Check to make sure we are getting all namespace metrics and statistics for i in range(len(metricNames)): values = [] for j in range(len(stats)): metricName = metricNames[i]['name'] statisticName = stats[j] unitType = metricNames[i]['unit'] metrics = self.tester.cloudwatch.get_metric_statistics(period, start, end, metricName, namespace, statisticName , dimensions=dimension, unit=unitType) ### This assures we are getting all statistics for all dimension metrics. assert int(len(metrics)) > 0 statisticValue = str(metrics[0][statisticName]) self.debug(metricName + ' : ' + statisticName + '=' + statisticValue + ' ' + unitType) values.append(statisticValue) self.tester.cloudwatch.validateStats(values) def setUpAutoscaling(self): ### setup autoscaling variables:s self.debug('Setting up AutoScaling, starting 1 instance') self.instance_type = 'm1.small' self.image = self.tester.ec2.get_emi(root_device_type='instance-store') self.launch_config_name='ASConfig' self.auto_scaling_group_name ='ASGroup' self.exact = 'ExactCapacity' self.change = 'ChangeInCapacity' self.percent = 'PercentChangeInCapacity' self.cleanUpAutoscaling() diskWrite = 'while [ 1 ];do dd if=/dev/zero of=/root/testFile bs=1M count=1; done &' diskRead = 'while [ 1 ];do dd if=/root/testFile of=/dev/null bs=1M count=1; done &' ### create launch configuration self.tester.autoscaling.create_launch_config(name= self.launch_config_name, image_id=self.image.id, instance_type=self.instance_type, key_name=self.keypair.name, security_groups=[self.group.name], instance_monitoring=True, user_data=diskWrite + ' ' + diskRead) ### create auto scale group self.tester.autoscaling.create_as_group(group_name=self.auto_scaling_group_name, availability_zones=self.zone, launch_config=self.launch_config_name, min_size=0, max_size=5, desired_capacity=1) ### create auto scale policys self.tester.autoscaling.create_as_policy(name=self.exact, adjustment_type=self.exact, scaling_adjustment=0, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.autoscaling.create_as_policy(name=self.change, adjustment_type=self.change, scaling_adjustment=1, as_name=self.auto_scaling_group_name, cooldown=0) self.tester.autoscaling.create_as_policy(name=self.percent, adjustment_type=self.percent, scaling_adjustment=-50, as_name=self.auto_scaling_group_name, cooldown=0) ## Wait for the instance to go to running state. self.tester.wait_for_result(self.tester.autoscaling.wait_for_instances, True, timeout=600, group_name=self.auto_scaling_group_name, tester=self.tester) self.instanceid = self.tester.autoscaling.get_last_instance_id(tester=self.tester) instance_list = self.tester.ec2.get_instances(idstring=self.instanceid) self.instance = instance_list.pop() self.debug('ASG is now setup.') ### Create and attach a volume self.volume = self.tester.ec2.create_volume(self.zone.pop()) self.tester.ec2.attach_volume(self.instance, self.volume, '/dev/sdf' ) ### Get the newly created policies. self.policy_exact = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.exact]) self.policy_change = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.change]) self.policy_percent = self.tester.autoscaling.connection.get_all_policies(policy_names=[self.percent]) self.debug('AutoScaling setup Complete') def cleanUpAutoscaling(self): self.tester.cloudwatch.delete_all_alarms() self.tester.autoscaling.delete_all_policies() self.tester.autoscaling.delete_as_group(name=self.auto_scaling_group_name,force=True) self.tester.autoscaling.delete_launch_config(self.launch_config_name) def isInService(self): group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) allInService = True for instance in group.instances: if not str(instance.lifecycle_state).endswith('InService'): allInService = False break return allInService def setUpAlarms(self): metric = 'CPUUtilization' comparison = '>' threshold = 0 period = 60 evaluation_periods= 1 statistic = 'Average' ### This alarm sets the number of running instances to exactly 0 alarm_exact = self.tester.cloudwatch.metric_alarm( 'exact', metric, comparison, threshold ,period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_exact.pop().policy_arn) ### This alarm sets the number of running instances to + 1 alarm_change = self.tester.cloudwatch.metric_alarm( 'change', metric, comparison, threshold ,period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_change.pop().policy_arn) ### This alarm sets the number of running instances to -50% alarm_percent = self.tester.cloudwatch.metric_alarm( 'percent', metric, comparison, threshold ,period, evaluation_periods, statistic, description='TEST', namespace='AWS/EC2', dimensions=self.instanceDimension, alarm_actions=self.policy_percent.pop().policy_arn) ### put all the alarms self.tester.cloudwatch.put_metric_alarm(alarm_change) self.tester.cloudwatch.put_metric_alarm(alarm_percent) self.tester.cloudwatch.put_metric_alarm(alarm_exact) def testDesribeAlarms(self): self.debug(self.tester.cloudwatch.describe_alarms()) assert len(self.tester.cloudwatch.describe_alarms()) >= 3 ### test describe_alarms_for_metric for created alarms assert len(self.tester.cloudwatch.describe_alarms_for_metric('CPUUtilization','AWS/EC2',dimensions=self.instanceDimension)) == 3 ### There are not be any alarms created for 'DiskReadOps' assert len(self.tester.cloudwatch.describe_alarms_for_metric('DiskReadOps','AWS/EC2',dimensions=self.instanceDimension)) == 0 ### test describe_alarm_history self.debug(self.tester.cloudwatch.describe_alarm_history()) assert len(self.tester.cloudwatch.describe_alarm_history()) >= 3 pass def testAlarms(self): ### The number of running instances should equal the desired_capacity for the auto_scaling_group = (1) group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 ### The number of running instances should still be 1 with 'exact' disabled self.tester.cloudwatch.disable_alarm_actions('exact') self.tester.cloudwatch.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.tester.cloudwatch.enable_alarm_actions('exact') self.debug('The number of running ' + self.auto_scaling_group_name + ' instances = 1') ### The number of running instances should equal the desired_capacity + scaling_adjustment = (2) self.tester.cloudwatch.set_alarm_state('change') self.tester.sleep(15) self.tester.wait_for_result(self.isInService,result=True, timeout=240) group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) self.debug(len(group.instances)) assert len(group.instances) == 2 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances changed to 2') ### The number of running instances should equal the total from the previous scaling_adjustment (2) - 50% = (1) self.tester.cloudwatch.set_alarm_state('percent') self.tester.sleep(15) group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) assert len(group.instances) == 1 self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances decreased by 50%') ### This should terminate all instances in the auto_scaling_group. self.tester.cloudwatch.set_alarm_state('exact') self.tester.sleep(15) group = self.tester.autoscaling.describe_as_group(name=self.auto_scaling_group_name) assert group.instances == None self.debug('Success the number of running ' + self.auto_scaling_group_name + ' instances is exactly 0') pass def testAwsReservedNamspaces(self): try: self.tester.cloudwatch.put_metric_data('AWS/AnyName', 'TestMetricName',1) except Exception, e: if str(e).count('The value AWS/ for parameter Namespace is invalid.'): self.tester.debug('testAwsReservedNamspaces generated expected InvalidParameterValue error.') return True self.tester.debug('testAwsReservedNamspaces did not throw expected InvalidParameterValue error.' ) return False