Beispiel #1
0
class LoadBalancing(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(int(time.time())))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(int(time.time())))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store", not_platform="windows")

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(keypair=self.keypair,
                                                                          group=self.group,
                                                                          zone=self.zone,
                                                                          port=self.load_balancer_port,
                                                                          filename='instance-name',
                                                                          image=self.image)

        self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
                                                              name="test-" + str(int(time.time())),
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)
        self.tester.register_lb_instances(self.load_balancer.name,
                                          self.web_servers.instances)

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer.
        Uses to backend instances with httpd servers.
        """
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "http://{0}:{1}/instance-name".format(lb_ip, self.load_balancer_port)
        self.tester.generate_http_requests(url=lb_url, count=1000)
class MyTestCase(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops( config_file=config_file, password=password)
        self.reservation = None
        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))

        ### Get an image to work with
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.clean_method = self.cleanup

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)

    def MyTestUnit(self):
        """
        A test description must go here......
        This test will simply run an instance and check that it is reachable via ssh
        """
        self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name)
        for instance in self.reservation.instances:
            instance.sys("uname -r")
Beispiel #3
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self, config_file="cloud.conf", password="******"):
        self.tester = Eucaops(config_file=config_file, password=password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" +
                                           self.start_time)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ###
        self.standing_reservation = self.tester.run_instance(
            keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(
            self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(
            self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(
            self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(
            self.standing_bucket_name, self.standing_key_name)

    def run_testcase(self, testcase_callback, **kwargs):
        poll_count = 20
        poll_interval = 20
        while (poll_count > 0):
            try:
                testcase_callback(**kwargs)
                break
            except Exception, e:
                self.tester.debug("Attempt failed due to: " + str(e) +
                                  "\nRetrying testcase in " +
                                  str(poll_interval))
            self.tester.sleep(poll_interval)
            poll_count = poll_count - 1
        if poll_count is 0:
            self.fail("Could not run an instance after " + str(poll_count) +
                      " tries with " + str(poll_interval) +
                      "s sleep in between")
Beispiel #4
0
class InstanceBasics(unittest.TestCase):
    def setUp(self, credpath=None):
        # Setup basic eutester object
        if credpath is None:
            credpath = arg_credpath
        self.tester = Eucaops(credpath=credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

    def tearDown(self):
        if self.reservation is not None:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.reservation = None
        self.group = None
        self.keypair = None
        self.tester = None
        self.ephemeral = None

    def create_attach_volume(self, instance, size):
        self.volume = self.tester.create_volume(instance.placement, size)
        device_path = "/dev/" + instance.block_device_prefix + "j"
        before_attach = instance.get_dev_dir()
        try:
            self.assertTrue(
                self.tester.attach_volume(instance, self.volume, device_path),
                "Failure attaching volume")
        except AssertionError, e:
            self.assertTrue(self.tester.delete_volume(self.volume))
            return False
        after_attach = instance.get_dev_dir()
        new_devices = self.tester.diff(after_attach, before_attach)
        if len(new_devices) is 0:
            return False
        self.volume_device = "/dev/" + new_devices[0].strip()
        instance.assertFilePresent(self.volume_device)
        return True
Beispiel #5
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        self.tester = Eucaops(config_file=self.args.config_file,
                              password=self.args.password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" +
                                               self.start_time)
            self.tester.authorize_group_by_name(group_name=self.group.name)
            self.tester.authorize_group_by_name(group_name=self.group.name,
                                                port=-1,
                                                protocol="icmp")
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair("keypair-" +
                                                   self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            self.image = self.tester.get_emi(root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc(
            ).machine
            self.version = self.tester.clc.sys(
                "cat " + self.tester.eucapath +
                "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(
                keypair=self.keypair.name,
                group=self.group.name,
                zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(
                self.volume)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(
                self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(
                self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(
                self.standing_bucket_name, self.standing_key_name)
        except Exception, e:
            self.clean_method()
Beispiel #6
0
class InstanceBasics(unittest.TestCase):
    def setUp(self):
        # Setup basic eutester object
        eucarc_regex = re.compile("eucarc-")
        eucarc_dirs = [path for path in os.listdir(".") if eucarc_regex.search(path)]
        eucarc_path = None
        if len(eucarc_dirs) > 0:
            eucarc_path = eucarc_dirs[0]
        self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath=eucarc_path)
        self.tester.poll_count = 120
        
        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

    
    def tearDown(self):
        if self.reservation is not None:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.reservation = None
        self.group = None
        self.keypair = None
        self.tester = None
        self.ephemeral = None
        
    def create_attach_volume(self, instance, size):
            self.volume = self.tester.create_volume(instance.placement, size)
            device_path = "/dev/" + instance.block_device_prefix  +"j"
            before_attach = instance.get_dev_dir()
            try:
                self.assertTrue(self.tester.attach_volume(instance, self.volume, device_path), "Failure attaching volume")
            except AssertionError, e:
                self.assertTrue( self.tester.delete_volume(self.volume))
                return False
            after_attach = instance.get_dev_dir()
            new_devices = self.tester.diff(after_attach, before_attach)
            if len(new_devices) is 0:
                return False
            self.volume_device = "/dev/" + new_devices[0].strip()
            instance.assertFilePresent(self.volume_device)
            return True
Beispiel #7
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        if not boto.config.has_section('Boto'):
            boto.config.add_section('Boto')
            boto.config.set('Boto', 'num_retries', '1')
            boto.config.set('Boto', 'http_socket_timeout', '20')
        self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password)
        self.tester.ec2.connection.timeout = 30
        self.servman = self.tester.service_manager
        self.instance_timeout = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" + self.start_time )
            self.tester.authorize_group_by_name(group_name=self.group.name )
            self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            if self.args.emi:
                self.image = self.tester.get_emi(self.args.emi)
            else:
                self.image = self.tester.get_emi(root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc().machine
            self.version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(image=self.image ,keypair=self.keypair.name,group=self.group.name, zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
            for instance in self.standing_reservation.instances:
                instance.sys("echo " + instance.id  + " > " + self.device)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)
            self.run_instance_params = {'image': self.image, 'keypair': self.keypair.name, 'group': self.group.name,
                                        'zone': self.zone, 'timeout': self.instance_timeout}
        except Exception, e:
            self.clean_method()
            raise Exception("Init for testcase failed. Reason: " + str(e))
Beispiel #8
0
class InstanceRestore(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        # Setup basic eutester object
        self.tester = Eucaops(config_file=self.args.config_file,
                              password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        self.cur_time = str(int(time.time()))
        self.ncs = self.tester.get_component_machines("nc")

    def clean_method(self):
        ncs = self.tester.get_component_machines("nc")
        for nc in ncs:
            nc.sys("service eucalyptus-nc start")

        ### RESET vmstate properties
        self.tester.modify_property("cloud.vmstate.instance_timeout", "60")
        self.tester.modify_property("cloud.vmstate.terminated_time", "60")
        for nc in self.ncs:
            nc.sys("service eucalyptus-nc start")
        self.tester.cleanup_artifacts()
        try:
            image = self.tester.get_emi(self.image)
        except Exception, e:
            self.tester.register_image(
                image_location=self.image.location,
                ramdisk=self.image.ramdisk_id,
                kernel=self.image.kernel_id,
                virtualization_type=self.image.virtualization_type)
Beispiel #9
0
class InstanceBasicsTest(EutesterTestCase):
    def __init__(self):
        #### Pre-conditions
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--test-zone", default="PARTI00")
        self.parser.add_argument("--test-emi", default=None)
        self.parser.add_argument("--build-number", default='')
        self.get_args()

        # Setup basic eutester object
        if not self.args.credpath:
            self.tester = Eucaops(config_file=self.args.config,
                                  password=self.args.password)
        else:
            self.tester = Eucaops(credpath=self.args.credpath)
        self.reservation = None

        ### Generate a group for the instance
        self.group = self.tester.add_group(
            group_name="inst-kvm-grp-" + str(time.time()).replace(".", "") +
            self.tester.id_generator() + "-" + self.args.build_number)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")

        self.keypair = self.tester.add_keypair(
            "inst-kvm-" + str(time.time()).replace(".", "") +
            self.tester.id_generator() + "-" + self.args.build_number)

        if not self.args.emi:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        else:
            self.image = self.tester.get_emi(self.args.test_emi)

    def clean_method(self):
        """
        Description: Attempts to clean up resources created in this test
        """
        self.tester.cleanup_artifacts()

    def stress_instance_test(self):
        self.reservation = self.tester.run_image(self.image,
                                                 zone=self.args.test_zone,
                                                 min=1,
                                                 max=1,
                                                 keypair=self.keypair.name,
                                                 group=self.group,
                                                 timeout=600)
Beispiel #10
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self, config_file="cloud.conf", password="******"):
        self.tester = Eucaops( config_file=config_file, password=password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" + self.start_time )
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        
        
        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ### 
        self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)
        

        
            
    def run_testcase(self, testcase_callback, **kwargs):
        poll_count = 20
        poll_interval = 20       
        while (poll_count > 0):
            try:
                testcase_callback(**kwargs)
                break
            except Exception, e:
                self.tester.debug("Attempt failed due to: " + str(e)  + "\nRetrying testcase in " + str(poll_interval) )
            self.tester.sleep(poll_interval)     
            poll_count = poll_count - 1  
        if poll_count is 0:
            self.fail("Could not run an instance after " + str(poll_count) +" tries with " + str(poll_interval) + "s sleep in between")
class InstanceRestore(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        # Setup basic eutester object
        self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        self.cur_time = str(int(time.time()))
        self.ncs = self.tester.get_component_machines("nc")

    def clean_method(self):
        ncs = self.tester.get_component_machines("nc")
        for nc in ncs:
            nc.sys("service eucalyptus-nc start")

        ### RESET vmstate properties
        self.tester.modify_property("cloud.vmstate.instance_timeout","60")
        self.tester.modify_property("cloud.vmstate.terminated_time","60")
        for nc in self.ncs:
            nc.sys("service eucalyptus-nc start")
        self.tester.cleanup_artifacts()
        try:
            image = self.tester.get_emi(self.image)
        except Exception,e:
            self.tester.register_image(image_location=self.image.location,
                                       ramdisk=self.image.ramdisk_id,
                                       kernel=self.image.kernel_id,
                                       virtualization_type=self.image.virtualization_type)
Beispiel #12
0
class CloudFormations(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = CFNops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

    def InstanceVolumeTemplate(self):
        self.stack_name = "volumeTest{0}".format(int(time.time()))
        template = Template()
        keyname_param = template.add_parameter(Parameter("KeyName", Description="Name of an existing EC2 KeyPair "
                                                                                "to enable SSH access to the instance",
                                                         Type="String",))
        template.add_mapping('RegionMap', {"": {"AMI": self.tester.get_emi().id}})
        for i in xrange(2):
            ec2_instance = template.add_resource(ec2.Instance("Instance{0}".format(i),
                                                              ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                                                              InstanceType="t1.micro", KeyName=Ref(keyname_param),
                                                              SecurityGroups=[self.group.name], UserData=Base64("80")))
            vol = template.add_resource(ec2.Volume("Volume{0}".format(i), Size="8",
                                                   AvailabilityZone=GetAtt("Instance{0}".format(i), "AvailabilityZone")))
            mount = template.add_resource(ec2.VolumeAttachment("MountPt{0}".format(i), InstanceId=Ref("Instance{0}".format(i)),
                                                               VolumeId=Ref("Volume{0}".format(i)), Device="/dev/vdc"))
        stack = self.tester.create_stack(self.stack_name, template.to_json(), parameters=[("KeyName",self.keypair.name)])
        def stack_completed():
            return self.tester.cloudformation.describe_stacks(self.stack_name).status == "CREATE_COMPLETE"
        self.tester.wait_for_result(stack_completed, True, timeout=600)
        self.tester.delete_stack(self.stack_name)

    def clean_method(self):
        self.tester.cleanup_artifacts()
class InstanceBasicsTest(EutesterTestCase):
    def __init__(self):
        #### Pre-conditions
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--test-zone", default="PARTI00")
        self.parser.add_argument("--test-emi", default=None)
        self.parser.add_argument("--build-number", default='')
        self.get_args()

        # Setup basic eutester object
        if not self.args.credpath:
            self.tester = Eucaops(config_file=self.args.config, password=self.args.password)
        else:
            self.tester = Eucaops(credpath=self.args.credpath)
        self.reservation = None

        ### Generate a group for the instance
        self.group = self.tester.add_group(group_name="inst-kvm-grp-" + str(time.time()).replace(".", "") +
                                                      self.tester.id_generator() + "-" + self.args.build_number)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")

        self.keypair = self.tester.add_keypair("inst-kvm-" + str(time.time()).replace(".", "") +
                                               self.tester.id_generator() + "-" + self.args.build_number)

        if not self.args.emi:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        else:
            self.image = self.tester.get_emi(self.args.test_emi)

    def clean_method(self):
        """
        Description: Attempts to clean up resources created in this test
        """
        self.tester.cleanup_artifacts()

    def stress_instance_test(self):
        self.reservation = self.tester.run_image(self.image,
                                                 zone=self.args.test_zone,
                                                 min=1, max=1,
                                                 keypair=self.keypair.name,
                                                 group=self.group,
                                                 timeout=600)
Beispiel #14
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" + self.start_time )
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.tester.clc = self.tester.service_manager.get_enabled_clc().machine
        self.old_version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0]
        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ### 
        self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)

    def clean_method(self):
        try:
            self.tester.terminate_instances()
        except Exception, e:
            self.tester.critical("Unable to terminate all instances")
        self.servman.start_all()
Beispiel #15
0
class MyTestCase(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops(config_file=config_file, password=password)
        self.reservation = None
        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))

        ### Get an image to work with
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.clean_method = self.cleanup

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)

    def MyTestUnit(self):
        """
        A test description must go here......
        This test will simply run an instance and check that it is reachable via ssh
        """
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name)
        for instance in self.reservation.instances:
            instance.sys("uname -r")
Beispiel #16
0
class InstanceBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        if self.address:
            assert isinstance(self.address,Address)
            self.tester.release_address(self.address)
        if self.volume:
            self.tester.delete_volume(self.volume)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)


    def BasicInstanceChecks(self, zone = None):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running')
            self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same')
            self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance')
            self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2",  "No such file or directory"),  'Did not find ephemeral storage at ' + instance.rootfs_device + "2")
        return self.reservation

    def ElasticIps(self, zone = None):
        """
       This case was developed to test elastic IPs in Eucalyptus. This test case does
       not test instances that are launched using private-addressing option.
       The test case executes the following tests:
           - allocates an IP, associates the IP to the instance, then pings the instance.
           - disassociates the allocated IP, then pings the instance.
           - releases the allocated IP address
       If any of the tests fail, the test case will error out, logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,zone=zone)
        for instance in self.reservation.instances:
            self.address = self.tester.allocate_address()
            self.assertTrue(self.address,'Unable to allocate address')
            self.tester.associate_address(instance, self.address)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(self.address)
            self.address = None
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate")
        return self.reservation

    def MaxSmallInstances(self, available_small=None,zone = None):
        """
        This case was developed to test the maximum number of m1.small vm types a configured
        cloud can run.  The test runs the maximum number of m1.small vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
        if available_small is None:
            available_small = self.tester.get_available_vms()
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small, zone=zone)
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
        return self.reservation

    def LargestInstance(self, zone = None):
        """
        This case was developed to test the maximum number of c1.xlarge vm types a configured
        cloud can run.  The test runs the maximum number of c1.xlarge vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,type="c1.xlarge",zone=zone)
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
        return self.reservation

    def MetaData(self, zone=None):
        """
        This case was developed to test the metadata service of an instance for consistency.
        The following meta-data attributes are tested:
           - public-keys/0/openssh-key
           - security-groups
           - instance-id
           - local-ipv4
           - public-ipv4
           - ami-id
           - ami-launch-index
           - reservation-id
           - placement/availability-zone
           - kernel-id
           - public-hostname
           - local-hostname
           - hostname
           - ramdisk-id
           - instance-type
           - any bad metadata that shouldn't be present.
        Missing nodes
         ['block-device-mapping/',  'ami-manifest-path']
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata')
            self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata')
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) 
            self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata')
            self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata')
            self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata')
            self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata')
            self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata')
            self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel),  'Incorrect kernel id in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type
            self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata')
            BAD_META_DATA_KEYS = ['foobar']
            for key in BAD_META_DATA_KEYS:
                self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node')
        return self.reservation

    def DNSResolveCheck(self, zone=None):
        """
        This case was developed to test DNS resolution information for public/private DNS
        names and IP addresses.  The tested DNS resolution behavior is expected to follow
        AWS EC2.  The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - nslookup on hostname; checks to see if it matches local-ipv4
           - nslookup on local-hostname; check to see if it matches local-ipv4
           - nslookup on local-ipv4; check to see if it matches local-hostname
           - nslookup on public-hostname; check to see if it matches local-ipv4
           - nslookup on public-ipv4; check to see if it matches public-host
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured # 
            if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]):
                # Per AWS standard, resolution should have private hostname or private IP as a valid response
                # Perform DNS resolution against private IP and private DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("local-ipv4")[0])[3]), "DNS lookup failed for private IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname
                self.assertTrue(re.search(instance.get_metadata("local-hostname")[0], instance.sys("nslookup " +  instance.get_metadata("local-ipv4")[0])[4]), "Incorrect DNS resolution for private IP address")
                # Perform DNS resolution against public IP and public DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("public-ipv4")[0])[3]), "DNS lookup failed for public IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname
                self.assertTrue(re.search(instance.get_metadata("public-hostname")[0], instance.sys("nslookup " +  instance.get_metadata("public-ipv4")[0])[4]), "Incorrect DNS resolution for public IP address")

        return self.reservation

    def DNSCheck(self, zone=None):
        """
        This case was developed to test to make sure Eucalyptus Dynamic DNS reports correct
        information for public/private IP address and DNS names passed to meta-data service.
        The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - check to see if local-ipv4 and local-hostname are not the same
           - check to see if public-ipv4 and public-hostname are not the same
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured # 
            if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]):
                # Make sure that private_ip_address is not the same as local-hostname
                self.assertFalse(re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on')
                # Make sure that ip_address is not the same as public-hostname
                self.assertFalse(re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on')

        return self.reservation

    def Reboot(self, zone=None):
        """
        This case was developed to test IP connectivity and volume attachment after
        instance reboot.  The following tests are done for this test case:
                   - creates a 1 gig EBS volume, then attach volume
                   - reboot instance
                   - attempts to connect to instance via ssh
                   - checks to see if EBS volume is attached
                   - detaches volume
                   - deletes volume
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            ### Create 1GB volume in first AZ
            self.volume = self.tester.create_volume(instance.placement, 1)
            self.volume_device = instance.attach_volume(self.volume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(self.volume)
            self.tester.delete_volume(self.volume)
            self.volume = None
        return self.reservation

    def run_terminate(self):
        reservation = None
        try:
            reservation = self.tester.run_instance(image=self.image,zone=self.zone, keypair=self.keypair.name, group=self.group.name)
            self.tester.terminate_instances(reservation)
            return 0
        except Exception, e:
            if reservation:
                self.tester.terminate_instances(reservation)
            return 1
Beispiel #17
0
class AutoScalingBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.emi:
            self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath)

       ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None

    def clean_method(self):
        ### DELETE group
        self.tester.delete_group(self.group)

        ### Delete keypair in cloud and from filesystem
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def AutoScalingBasics(self):
        ### test create  and describe launch config
        self.launch_config_name = 'Test-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=self.launch_config_name,
                                         image_id=self.image.id,
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        if len(self.tester.describe_launch_config([self.launch_config_name])) != 1:
            raise Exception('Launch Config not created')
        self.debug('**** Created Launch Config: ' +
                   self.tester.describe_launch_config([self.launch_config_name])[0].name)

        ### test create and describe auto scale group
        self.initial_size = len(self.tester.describe_as_group())
        self.auto_scaling_group_name = 'ASG-' + str(time.time())
        self.tester.create_as_group(group_name=self.auto_scaling_group_name,
                                    launch_config=self.launch_config_name,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=0,
                                    max_size=5,
                                    connection=self.tester.autoscale)
        if len(self.tester.describe_as_group([self.auto_scaling_group_name])) != 1:
            raise Exception('Auto Scaling Group not created')
        self.debug("**** Created Auto Scaling Group: " +
                   self.tester.describe_as_group([self.auto_scaling_group_name])[0].name)

        ### Test Create and describe Auto Scaling Policy
        self.up_policy_name = "Up-Policy-" + str(time.time())
        self.up_size = 4
        self.tester.create_as_policy(name=self.up_policy_name,
                                     adjustment_type="ChangeInCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=4,
                                     cooldown=120)

        self.down_policy_name = "Down-Policy-" + str(time.time())
        self.down_size = -50
        self.tester.create_as_policy(name=self.down_policy_name,
                                     adjustment_type="PercentChangeInCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=self.down_size,
                                     cooldown=120)

        self.exact_policy_name = "Exact-Policy-" + str(time.time())
        self.exact_size = 0
        self.tester.create_as_policy(name=self.exact_policy_name,
                                     adjustment_type="ExactCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=self.exact_size,
                                     cooldown=120)

        ### Test all policies added to group
        if len(self.tester.autoscale.get_all_policies()) != 3:
            raise Exception('Auto Scaling policies not created')
        self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        ### Test Execute ChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.up_policy_name, as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != self.up_size:
            raise Exception("Auto Scale Up not executed")
        self.debug("Executed  ChangeInCapacity policy, increased desired capacity to: " +
                   str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity))

        ### Test Execute PercentChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.down_policy_name, as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != 0.5 * self.up_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " +
                   str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity))

        ### Test Execute ExactCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.exact_policy_name, as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity != self.exact_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed ExactCapacity policy, exact capacity is: " +
                   str(self.tester.describe_as_group([self.auto_scaling_group_name])[0].desired_capacity))

        ### Test Delete all Auto Scaling Policies
        for policy in self.tester.autoscale.get_all_policies():
            self.tester.delete_as_policy(policy_name=policy.name, autoscale_group=policy.as_name)
        if len(self.tester.autoscale.get_all_policies()) != 0:
            raise Exception('Auto Scaling policy not deleted')
        self.debug("**** Deleted Auto Scaling Policy: " + self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        ### Test Delete Auto Scaling Group
        self.tester.delete_as_group(names=self.auto_scaling_group_name)
        if len(self.tester.describe_as_group([self.auto_scaling_group_name])) != 0:
            raise Exception('Auto Scaling Group not deleted')
        self.debug('**** Deleted Auto Scaling Group: ' + self.auto_scaling_group_name)

        ### pause for Auto scaling group to be deleted
        # TODO write wait/poll op for auto scaling groups
        # time.sleep(5)

        ### Test delete launch config
        self.tester.delete_launch_config(self.launch_config_name)
        if len(self.tester.describe_launch_config([self.launch_config_name])) != 0:
            raise Exception('Launch Config not deleted')
        self.debug('**** Deleted Launch Config: ' + self.launch_config_name)

    def AutoScalingInstanceBasics(self):
        """
        This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup
        """
        pass

    def too_many_launch_configs_test(self):
        """
        AWS enforces a 100 LC per account limit this tests what happens if we create more
        """
        for i in range(101):
            self.launch_config_name = 'Test-Launch-Config-' + str(i + 1)
            self.tester.create_launch_config(name=self.launch_config_name,
                                             image_id=self.image.id)
            if len(self.tester.describe_launch_config()) > 100:
                raise Exception("More then 100 launch configs exist in 1 account")
        for lc in self.tester.describe_launch_config():
            self.tester.delete_launch_config(lc.name)

    def too_many_policies_test(self):
        launch_config_name = 'LC-' + str(time.time())
        self.tester.create_launch_config(name=launch_config_name,
                                         image_id=self.image.id,
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        asg = 'ASG-' + str(time.time())
        self.tester.create_as_group(group_name=asg,
                                    launch_config=launch_config_name,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=0,
                                    max_size=5,
                                    connection=self.tester.autoscale)
        for i in range(26):
            policy_name = "Policy-" + str(i + 1)
            self.tester.create_as_policy(name=policy_name,
                                         adjustment_type="ExactCapacity",
                                         as_name=asg,
                                         scaling_adjustment=0,
                                         cooldown=120)
        if len(self.tester.autoscale.get_all_policies()) > 25:
            raise Exception("More than 25 policies exist for 1 auto scaling group")
        self.tester.delete_as_group(names=asg)

    def too_many_as_groups(self):
        """
        AWS imposes a 20 ASG/acct limit
        """
        pass

    def clear_all(self):
        self.tester.delete_all_autoscaling_groups()
        self.tester.delete_all_launch_configs()
Beispiel #18
0
class ReportingBasics(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops( config_file=config_file, password=password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.volume = None
        self.bucket = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.clean_method = self.cleanup
        self.cur_time = str(int(time.time()))
        date_fields = time.localtime()
        self.date = str(date_fields.tm_year) + "-" + str(date_fields.tm_mon) + "-31"
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        poll_interval = 1
        write_interval = 1
        size_time_size_unit = "MB"
        size_time_time_unit = "MINS"
        size_unit = "MB"
        time_unit = "MINS"
        self.modify_property(property="reporting.default_poll_interval_mins",value=poll_interval)
        self.modify_property(property="reporting.default_write_interval_mins",value=write_interval)
        self.modify_property(property="reporting.default_size_time_size_unit",value=size_time_size_unit)
        self.modify_property(property="reporting.default_size_time_time_unit",value=size_time_time_unit)
        self.modify_property(property="reporting.default_size_unit",value=size_unit)
        self.modify_property(property="reporting.default_time_unit",value=time_unit)

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        if self.volume:
            self.tester.delete_volume(self.volume)
        if self.bucket:
            self.tester.clear_bucket(self.bucket)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def instance(self):
        self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        file_size_in_mb = 500
        for instance in self.reservation.instances:
            assert isinstance(instance, EuInstance)
            self.volume = self.tester.create_volume(zone=self.zone, size=4)
            device_path = instance.attach_volume(self.volume)
            instance.sys("mkfs.ext3 -F " + device_path)
            instance.sys("mount " + device_path + " /mnt")
            ### Write to root fs
            instance.sys("dd if=/dev/zero of=/tmp/test.img count=" + str(file_size_in_mb) + " bs=1M")
            ### Write to volume
            instance.sys("dd if=/dev/zero of=/mnt/test.img count=" + str(file_size_in_mb) + " bs=1M")

        self.tester.sleep(180)
        for instance in self.reservation.instances:
            report_output = self.generate_report("instance","csv", self.date)
            instance_lines = self.tester.grep(instance.id, report_output)
            for line in instance_lines:
                instance_data = self.parse_instance_line(line)
                #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line):
                if not re.match(instance_data.type, "m1.small"):
                    raise Exception("Failed to find proper output for " + str(instance) + " type. Received: " + instance_data.type )
                if not int(instance_data.number)  == 1:
                    raise Exception("Failed to find proper output for " + str(instance) + " number. Received: " + instance_data.number )
                if not int(instance_data.unit_time)  > 2 :
                    raise Exception("Failed to find proper output for " + str(instance) + " unit_time. Received: " + instance_data.unit_time )
                if not int(instance_data.disk_write)  > 1000:
                    raise Exception("Failed to find proper output for " + str(instance) + " disk_write. Received: " + instance_data.disk_write )
                if not int(instance_data.disk_time_write)  > 200:
                    raise Exception("Failed to find proper output for " + str(instance) + " disk_time_write. Received: " + instance_data.disk_time_write )


    def parse_instance_line(self, line):
        InstanceData = namedtuple('InstanceData', 'id type number unit_time cpu net_total_in net_total_out '
                                                'net_extern_in net_extern_out disk_read disk_write disk_iops_read '
                                                'disk_iops_write disk_time_read disk_time_write')
        values = line.split(",")
        return InstanceData(values[0],values[1],values[2],values[3],values[4],values[5],values[6],values[7],
                            values[8],values[9],values[10],values[11],values[12],values[13],values[14])

    def s3(self):
        self.bucket = self.tester.create_bucket(bucket_name="reporting-bucket-" + self.cur_time)
        key_size = 10
        self.tester.debug("Creating random " + str(key_size) + "MB of data")
        rand_string = self.tester.id_generator(size=1024*1024*10)
        self.tester.upload_object(self.bucket.name, "reporting-key" ,contents=rand_string)
        self.tester.sleep(120)
        report_output = self.generate_report("s3", "csv",self.date)
        bucket_lines = self.tester.grep(self.bucket.name, report_output)
        for line in bucket_lines:
            bucket_data = self.parse_bucket_line(line)
            if not int(bucket_data.size) == 10:
                raise Exception('Failed to find proper size for %s' % str(self.bucket))
            if not int(bucket_data.keys) == 1:
                raise Exception('Failed to find proper number of keys for %s' % str(self.bucket))
            if not int(bucket_data.unit_time) > 16:
                raise Exception('Failed to find proper amount of usage for %s' % str(self.bucket))

    def parse_bucket_line(self, line):
        BucketData = namedtuple('BucketData', 'name keys size unit_time')
        values = line.split(",")
        return BucketData(values[0],values[1],values[2],values[3] )

    def generate_report(self, type, format, end_date):
        return self.clc.sys("source " + self.tester.credpath + "/eucarc && eureport-generate-report -t " +
                    str(type) +" -f " + str(format) + " -e " + str(end_date) )

    def modify_property(self, property, value):
        """
        Modify a eucalyptus property through the command line euca-modify-property tool
        property        Property to modify
        value           Value to set it too
        """
        command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(property) + "=" + str(value)
        if self.clc.found(command, property):
            self.debug("Properly modified property " + property)
        else:
            raise Exception("Setting property " + property + " failed")
Beispiel #19
0
class AutoScalingBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store",
                                             not_location="loadbalancer")
        self.address = None
        self.asg = None

    def clean_method(self):
        if self.asg:
            self.tester.wait_for_result(self.gracefully_delete, True)
            self.tester.delete_as_group(self.asg.name, force=True)
        self.tester.cleanup_artifacts()

    def AutoScalingBasics(self):
        ### create launch configuration
        self.launch_config_name = 'Test-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=self.launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])

        ### create auto scale group
        self.auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(
            group_name=self.auto_scaling_group_name,
            availability_zones=self.tester.get_zones(),
            launch_config=self.launch_config_name,
            min_size=0,
            max_size=5)

        ### Test Create and describe Auto Scaling Policy
        self.up_policy_name = "Up-Policy-" + str(time.time())
        self.up_size = 4
        self.tester.create_as_policy(name=self.up_policy_name,
                                     adjustment_type="ChangeInCapacity",
                                     scaling_adjustment=4,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)
        if len(
                self.tester.autoscale.get_all_policies(
                    policy_names=[self.up_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.up_policy_name +
                            ' not created')

        self.down_policy_name = "Down-Policy-" + str(time.time())
        self.down_size = -50
        self.tester.create_as_policy(name=self.down_policy_name,
                                     adjustment_type="PercentChangeInCapacity",
                                     scaling_adjustment=self.down_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(
                self.tester.autoscale.get_all_policies(
                    policy_names=[self.down_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.down_policy_name +
                            ' not created')

        self.exact_policy_name = "Exact-Policy-" + str(time.time())
        self.exact_size = 0
        self.tester.create_as_policy(name=self.exact_policy_name,
                                     adjustment_type="ExactCapacity",
                                     scaling_adjustment=self.exact_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(
                self.tester.autoscale.get_all_policies(
                    policy_names=[self.exact_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' +
                            self.exact_policy_name + ' not created')

        self.debug("**** Created Auto Scaling Policies: " +
                   self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        self.tester.wait_for_result(self.scaling_activities_complete,
                                    True,
                                    timeout=180)
        ### Test Execute ChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.up_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(
                self.auto_scaling_group_name).desired_capacity != self.up_size:
            raise Exception("Auto Scale Up not executed")
        self.debug(
            "Executed  ChangeInCapacity policy, increased desired capacity to: "
            + str(
                self.tester.describe_as_group(
                    self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete,
                                    True,
                                    timeout=180)

        ### Test Execute PercentChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.down_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(
                self.auto_scaling_group_name
        ).desired_capacity != 0.5 * self.up_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug(
            "Executed PercentChangeInCapacity policy, decreased desired capacity to: "
            + str(
                self.tester.describe_as_group(
                    self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete,
                                    True,
                                    timeout=180)

        ### Test Execute ExactCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.exact_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(self.auto_scaling_group_name
                                         ).desired_capacity != self.exact_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed ExactCapacity policy, exact capacity is: " + str(
            self.tester.describe_as_group(
                self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete,
                                    True,
                                    timeout=180)

        ### Test Delete all Auto Scaling Policies
        self.tester.delete_all_policies()

        ### Test Delete Auto Scaling Group
        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None

        ### Test delete launch config
        self.tester.delete_launch_config(self.launch_config_name)

    def scaling_activities_complete(self):
        activities = self.asg.get_activities()
        for activity in activities:
            assert isinstance(activity, Activity)
            if activity.progress != 100:
                return False
        return True

    def AutoScalingInstanceBasics(self):
        """
        This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup
        """
        pass

    def too_many_launch_configs_test(self):
        """
        AWS enforces a 100 LC per account limit this tests what happens if we create more
        """
        for i in range(101):
            self.launch_config_name = 'Test-Launch-Config-' + str(i + 1)
            self.tester.create_launch_config(name=self.launch_config_name,
                                             image_id=self.image.id)
            if len(self.tester.describe_launch_config()) > 100:
                raise Exception(
                    "More then 100 launch configs exist in 1 account")
        for lc in self.tester.describe_launch_config():
            self.tester.delete_launch_config(lc.name)

    def too_many_policies_test(self):
        """
        AWS enforces a 25 policy per account limit this tests what happens if we create more
        """
        launch_config_name = 'LC-' + str(time.time())
        self.tester.create_launch_config(name=launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        asg_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(
            group_name=asg_name,
            launch_config=launch_config_name,
            availability_zones=self.tester.get_zones(),
            min_size=0,
            max_size=5)
        for i in range(26):
            policy_name = "Policy-" + str(i + 1)
            self.tester.create_as_policy(name=policy_name,
                                         adjustment_type="ExactCapacity",
                                         as_name=asg_name,
                                         scaling_adjustment=0,
                                         cooldown=120)
        if len(self.tester.autoscale.get_all_policies()) > 25:
            raise Exception(
                "More than 25 policies exist for 1 auto scaling group")
        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None

    def too_many_as_groups(self):
        """
        AWS imposes a 20 ASG/acct limit
        """
        pass

    def clear_all(self):
        """

        remove ALL scaling policies, auto scaling groups and launch configs
        """
        self.tester.delete_all_policies()
        self.tester.delete_all_autoscaling_groups()
        self.tester.delete_all_launch_configs()

    def change_config(self):
        ### create initial launch configuration
        first_launch_config = 'First-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=first_launch_config,
                                         image_id=self.image.id,
                                         instance_type="m1.small")

        # create a replacement LC with different instance type
        second_launch_config = 'Second-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=second_launch_config,
                                         image_id=self.image.id,
                                         instance_type="m1.large")

        ### create auto scale group
        auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(
            group_name=auto_scaling_group_name,
            launch_config=first_launch_config,
            availability_zones=self.tester.get_zones(),
            min_size=1,
            max_size=4,
            desired_capacity=1)

        assert isinstance(self.asg, AutoScalingGroup)
        self.tester.update_as_group(group_name=self.asg.name,
                                    launch_config=second_launch_config,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=1,
                                    max_size=4)

        def wait_for_instances(number=1):
            self.asg = self.tester.describe_as_group(self.asg.name)
            instances = self.asg.instances
            if not instances:
                self.tester.debug("No instances in ASG")
                return False
            if len(self.asg.instances) != number:
                self.tester.debug("Instances not yet allocated")
                return False
            for instance in instances:
                assert isinstance(instance, Instance)
                instance = self.tester.get_instances(
                    idstring=instance.instance_id)[0]
                if instance.state != "running":
                    self.tester.debug("Instance: " + str(instance) +
                                      " still in " + instance.state + " state")
                    return False
                else:
                    self.tester.debug("Instance: " + str(instance) +
                                      " now running")
            return True

        self.tester.wait_for_result(wait_for_instances, True, timeout=360)
        ### Set desired capacity
        new_desired = 2
        self.asg.set_capacity(new_desired)
        self.tester.wait_for_result(wait_for_instances,
                                    True,
                                    number=new_desired,
                                    timeout=360)
        #wait briefly before changing capacity
        # TODO  get new instance ID and get it's type verify correct type
        ### Delete Auto Scaling Group
        last_instance = self.tester.get_instances(
            idstring=self.tester.get_last_instance_id())[0]
        assert last_instance.instance_type == "m1.large"

        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None
        ### delete launch configs
        self.tester.delete_launch_config(first_launch_config)
        self.tester.delete_launch_config(second_launch_config)

    def gracefully_delete(self, asg=None):
        if not asg:
            asg = self.asg
        assert isinstance(asg, AutoScalingGroup)
        try:
            self.tester.delete_as_group(name=asg.name, force=True)
        except BotoServerError, e:
            if e.status == 400 and e.reason == "ScalingActivityInProgress":
                return False
        return True
Beispiel #20
0
class StickinessBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config,
                                  password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(int(time.time())))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(int(time.time())))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi()

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(keypair=self.keypair,
                                                                           group=self.group,
                                                                           zone=self.zone,
                                                                           port=self.load_balancer_port,
                                                                           filename='instance-name',
                                                                           image=self.image)

        self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
                                                              name="test-" + str(int(time.time())),
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)
        self.tester.register_lb_instances(self.load_balancer.name,
                                          self.web_servers.instances)


    def clean_method(self):
        self.tester.cleanup_artifacts()

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer.
        Uses to backend instances with httpd servers.
        """
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "http://{0}:{1}/instance-name".format(lb_ip, self.load_balancer_port)
        return self.tester.generate_http_requests(url=lb_url, count=100, worker_threads=1)

    def session_affinity_test(self):
        lbpolicy = "LB-Policy"
        self.tester.create_lb_cookie_stickiness_policy(cookie_expiration_period=300,
                                                       lb_name=self.load_balancer.name,
                                                       policy_name=lbpolicy)
        acpolicy = "AC-Policy"
        self.tester.create_app_cookie_stickiness_policy(name="test-cookie",
                                                        lb_name=self.load_balancer.name,
                                                        policy_name=acpolicy)
        """test lb stickiness"""
        self.tester.sleep(2)
        self.tester.set_lb_policy(lb_name=self.load_balancer.name, lb_port=80, policy_name=lbpolicy)
        responses = self.GenerateRequests()
        host = responses[0]
        for response in responses:
            if response != host:
                raise Exception(
                    "Expected same response due to load balancer stickiness policy. Got initial response: " + host +
                    " subsequent response: " + response)

        """test app cookie stickiness"""
        self.tester.set_lb_policy(lb_name=self.load_balancer.name, lb_port=80, policy_name=acpolicy)
        responses = self.GenerateRequests()
        host = responses[0]
        for response in responses:
            if response != host:
                raise Exception(
                    "Expected same response due to app cookie stickiness policy. Got initial response: " + host +
                    " subsequent response: " + response)
        return
Beispiel #21
0
class InstanceBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        self.tester = Eucaops(credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        if self.address:
            assert isinstance(self.address, Address)
            self.tester.release_address(self.address)
        if self.volume:
            self.tester.delete_volume(self.volume)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def BasicInstanceChecks(self, zone=None):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:
            self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                            'Instance did not go to running')
            self.assertNotEqual(instance.public_dns_name,
                                instance.private_ip_address,
                                'Public and private IP are the same')
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            'Could not ping instance')
            self.assertFalse(
                instance.found("ls -1 /dev/" + instance.rootfs_device + "2",
                               "No such file or directory"),
                'Did not find ephemeral storage at ' + instance.rootfs_device +
                "2")
        return self.reservation

    def ElasticIps(self, zone=None):
        """
       This case was developed to test elastic IPs in Eucalyptus. This test case does
       not test instances that are launched using private-addressing option.
       The test case executes the following tests:
           - allocates an IP, associates the IP to the instance, then pings the instance.
           - disassociates the allocated IP, then pings the instance.
           - releases the allocated IP address
       If any of the tests fail, the test case will error out, logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            self.address = self.tester.allocate_address()
            self.assertTrue(self.address, 'Unable to allocate address')
            self.tester.associate_address(instance, self.address)
            instance.update()
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(self.address)
            self.address = None
            instance.update()
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            "Could not ping after dissassociate")
        return self.reservation

    def MaxSmallInstances(self, available_small=None, zone=None):
        """
        This case was developed to test the maximum number of m1.small vm types a configured
        cloud can run.  The test runs the maximum number of m1.small vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
        if available_small is None:
            available_small = self.tester.get_available_vms()
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    min=available_small,
                                                    max=available_small,
                                                    zone=zone)
        self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                        'Not all instances  went to running')
        return self.reservation

    def LargestInstance(self, zone=None):
        """
        This case was developed to test the maximum number of c1.xlarge vm types a configured
        cloud can run.  The test runs the maximum number of c1.xlarge vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    type="c1.xlarge",
                                                    zone=zone)
        self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                        'Not all instances  went to running')
        return self.reservation

    def MetaData(self, zone=None):
        """
        This case was developed to test the metadata service of an instance for consistency.
        The following meta-data attributes are tested:
           - public-keys/0/openssh-key
           - security-groups
           - instance-id
           - local-ipv4
           - public-ipv4
           - ami-id
           - ami-launch-index
           - reservation-id
           - placement/availability-zone
           - kernel-id
           - public-hostname
           - local-hostname
           - hostname
           - ramdisk-id
           - instance-type
           - any bad metadata that shouldn't be present.
        Missing nodes
         ['block-device-mapping/',  'ami-manifest-path']
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-keys/0/openssh-key")
                    [0].split('eucalyptus.')[-1], self.keypair.name),
                'Incorrect public key in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("security-groups")[0],
                    self.group.name), 'Incorrect security group in metadata')
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], ""))
            self.assertTrue(
                re.match(instance.get_metadata("instance-id")[0], instance.id),
                'Incorrect instance id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-ipv4")[0],
                    instance.private_ip_address),
                'Incorrect private ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-ipv4")[0],
                    instance.ip_address), 'Incorrect public ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-id")[0], instance.image_id),
                'Incorrect ami id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-launch-index")[0],
                    instance.ami_launch_index),
                'Incorrect launch index in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("reservation-id")[0],
                    self.reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("placement/availability-zone")[0],
                    instance.placement),
                'Incorrect availability-zone in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("kernel-id")[0], instance.kernel),
                'Incorrect kernel id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-hostname")[0],
                    instance.public_dns_name),
                'Incorrect public host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-hostname")[0],
                    instance.private_dns_name),
                'Incorrect private host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("hostname")[0], instance.dns_name),
                'Incorrect host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ramdisk-id")[0], instance.ramdisk),
                'Incorrect ramdisk in metadata')  #instance-type
            self.assertTrue(
                re.match(
                    instance.get_metadata("instance-type")[0],
                    instance.instance_type),
                'Incorrect instance type in metadata')
            BAD_META_DATA_KEYS = ['foobar']
            for key in BAD_META_DATA_KEYS:
                self.assertTrue(
                    re.search("Not Found",
                              "".join(instance.get_metadata(key))),
                    'No fail message on invalid meta-data node')
        return self.reservation

    def DNSResolveCheck(self, zone=None):
        """
        This case was developed to test DNS resolution information for public/private DNS
        names and IP addresses.  The tested DNS resolution behavior is expected to follow
        AWS EC2.  The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - nslookup on hostname; checks to see if it matches local-ipv4
           - nslookup on local-hostname; check to see if it matches local-ipv4
           - nslookup on local-ipv4; check to see if it matches local-hostname
           - nslookup on public-hostname; check to see if it matches local-ipv4
           - nslookup on public-ipv4; check to see if it matches public-host
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured #
            if re.match("internal",
                        instance.private_dns_name.split('eucalyptus.')[-1]):
                # Per AWS standard, resolution should have private hostname or private IP as a valid response
                # Perform DNS resolution against private IP and private DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("hostname")[0])[3]),
                    "DNS lookup failed for hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("hostname")[0])[5]),
                    "Incorrect DNS resolution for hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("local-hostname")[0])[3]),
                    "DNS lookup failed for private hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("local-hostname")[0])[5]),
                    "Incorrect DNS resolution for private hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("local-ipv4")[0])
                        [3]), "DNS lookup failed for private IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-hostname")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("local-ipv4")[0])
                        [4]),
                    "Incorrect DNS resolution for private IP address")
                # Perform DNS resolution against public IP and public DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("public-hostname")[0])[3]),
                    "DNS lookup failed for public-hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("public-hostname")[0])[5]),
                    "Incorrect DNS resolution for public-hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("public-ipv4")[0])
                        [3]), "DNS lookup failed for public IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname
                self.assertTrue(
                    re.search(
                        instance.get_metadata("public-hostname")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("public-ipv4")[0])
                        [4]), "Incorrect DNS resolution for public IP address")

        return self.reservation

    def DNSCheck(self, zone=None):
        """
        This case was developed to test to make sure Eucalyptus Dynamic DNS reports correct
        information for public/private IP address and DNS names passed to meta-data service.
        The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - check to see if local-ipv4 and local-hostname are not the same
           - check to see if public-ipv4 and public-hostname are not the same
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured #
            if re.match("internal",
                        instance.private_dns_name.split('eucalyptus.')[-1]):
                # Make sure that private_ip_address is not the same as local-hostname
                self.assertFalse(
                    re.match(instance.private_ip_address,
                             instance.private_dns_name),
                    'local-ipv4 and local-hostname are the same with DNS on')
                # Make sure that ip_address is not the same as public-hostname
                self.assertFalse(
                    re.match(instance.ip_address, instance.public_dns_name),
                    'public-ipv4 and public-hostname are the same with DNS on')

        return self.reservation

    def Reboot(self, zone=None):
        """
        This case was developed to test IP connectivity and volume attachment after
        instance reboot.  The following tests are done for this test case:
                   - creates a 1 gig EBS volume, then attach volume
                   - reboot instance
                   - attempts to connect to instance via ssh
                   - checks to see if EBS volume is attached
                   - detaches volume
                   - deletes volume
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:
            ### Create 1GB volume in first AZ
            self.volume = self.tester.create_volume(instance.placement, 1)
            self.volume_device = instance.attach_volume(self.volume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(self.volume)
            self.tester.delete_volume(self.volume)
            self.volume = None
        return self.reservation

    def run_terminate(self):
        reservation = None
        try:
            reservation = self.tester.run_instance(image=self.image,
                                                   zone=self.zone,
                                                   keypair=self.keypair.name,
                                                   group=self.group.name)
            self.tester.terminate_instances(reservation)
            return 0
        except Exception, e:
            if reservation:
                self.tester.terminate_instances(reservation)
            return 1
Beispiel #22
0
class StickinessBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(int(time.time())))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" +
                                               str(int(time.time())))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi()

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(
            keypair=self.keypair,
            group=self.group,
            zone=self.zone,
            port=self.load_balancer_port,
            filename='instance-name',
            image=self.image)

        self.load_balancer = self.tester.create_load_balancer(
            zones=[self.zone],
            name="test-" + str(int(time.time())),
            load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)
        self.tester.register_lb_instances(self.load_balancer.name,
                                          self.web_servers.instances)

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer.
        Uses to backend instances with httpd servers.
        """
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "http://{0}:{1}/instance-name".format(lb_ip,
                                                       self.load_balancer_port)
        return self.tester.generate_http_requests(url=lb_url,
                                                  count=100,
                                                  worker_threads=1)

    def session_affinity_test(self):
        lbpolicy = "LB-Policy"
        self.tester.create_lb_cookie_stickiness_policy(
            cookie_expiration_period=300,
            lb_name=self.load_balancer.name,
            policy_name=lbpolicy)
        acpolicy = "AC-Policy"
        self.tester.create_app_cookie_stickiness_policy(
            name="test-cookie",
            lb_name=self.load_balancer.name,
            policy_name=acpolicy)
        """test lb stickiness"""
        self.tester.sleep(2)
        self.tester.set_lb_policy(lb_name=self.load_balancer.name,
                                  lb_port=80,
                                  policy_name=lbpolicy)
        responses = self.GenerateRequests()
        host = responses[0]
        for response in responses:
            if response != host:
                raise Exception(
                    "Expected same response due to load balancer stickiness policy. Got initial response: "
                    + host + " subsequent response: " + response)
        """test app cookie stickiness"""
        self.tester.set_lb_policy(lb_name=self.load_balancer.name,
                                  lb_port=80,
                                  policy_name=acpolicy)
        responses = self.GenerateRequests()
        host = responses[0]
        for response in responses:
            if response != host:
                raise Exception(
                    "Expected same response due to app cookie stickiness policy. Got initial response: "
                    + host + " subsequent response: " + response)
        return
Beispiel #23
0
class ReportingBasics(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops(config_file=config_file, password=password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.volume = None
        self.bucket = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.clean_method = self.cleanup
        self.cur_time = str(int(time.time()))
        date_fields = time.localtime()
        self.date = str(date_fields.tm_year) + "-" + str(
            date_fields.tm_mon) + "-31"
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        poll_interval = 1
        write_interval = 1
        size_time_size_unit = "MB"
        size_time_time_unit = "MINS"
        size_unit = "MB"
        time_unit = "MINS"
        self.modify_property(property="reporting.default_poll_interval_mins",
                             value=poll_interval)
        self.modify_property(property="reporting.default_write_interval_mins",
                             value=write_interval)
        self.modify_property(property="reporting.default_size_time_size_unit",
                             value=size_time_size_unit)
        self.modify_property(property="reporting.default_size_time_time_unit",
                             value=size_time_time_unit)
        self.modify_property(property="reporting.default_size_unit",
                             value=size_unit)
        self.modify_property(property="reporting.default_time_unit",
                             value=time_unit)

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        if self.volume:
            self.tester.delete_volume(self.volume)
        if self.bucket:
            self.tester.clear_bucket(self.bucket)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def instance(self):
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=self.zone)
        file_size_in_mb = 500
        for instance in self.reservation.instances:
            assert isinstance(instance, EuInstance)
            self.volume = self.tester.create_volume(azone=self.zone, size=4)
            device_path = instance.attach_volume(self.volume)
            instance.sys("mkfs.ext3 -F " + device_path)
            instance.sys("mount " + device_path + " /mnt")
            ### Write to root fs
            instance.sys("dd if=/dev/zero of=/tmp/test.img count=" +
                         str(file_size_in_mb) + " bs=1M")
            ### Write to volume
            instance.sys("dd if=/dev/zero of=/mnt/test.img count=" +
                         str(file_size_in_mb) + " bs=1M")

        self.tester.sleep(180)
        for instance in self.reservation.instances:
            report_output = self.generate_report("instance", "csv", self.date)
            instance_lines = self.tester.grep(instance.id, report_output)
            for line in instance_lines:
                instance_data = self.parse_instance_line(line)
                #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line):
                if not re.match(instance_data.type, "m1.small"):
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " type. Received: " +
                                    instance_data.type)
                if not int(instance_data.number) == 1:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " number. Received: " +
                                    instance_data.number)
                if not int(instance_data.unit_time) > 2:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " unit_time. Received: " +
                                    instance_data.unit_time)
                if not int(instance_data.disk_write) > 1000:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " disk_write. Received: " +
                                    instance_data.disk_write)
                if not int(instance_data.disk_time_write) > 200:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) +
                                    " disk_time_write. Received: " +
                                    instance_data.disk_time_write)

    def parse_instance_line(self, line):
        InstanceData = namedtuple(
            'InstanceData',
            'id type number unit_time cpu net_total_in net_total_out '
            'net_extern_in net_extern_out disk_read disk_write disk_iops_read '
            'disk_iops_write disk_time_read disk_time_write')
        values = line.split(",")
        return InstanceData(values[0], values[1], values[2], values[3],
                            values[4], values[5], values[6], values[7],
                            values[8], values[9], values[10], values[11],
                            values[12], values[13], values[14])

    def s3(self):
        self.bucket = self.tester.create_bucket(
            bucket_name="reporting-bucket-" + self.cur_time)
        key_size = 10
        self.tester.debug("Creating random " + str(key_size) + "MB of data")
        rand_string = self.tester.id_generator(size=1024 * 1024 * 10)
        self.tester.upload_object(self.bucket.name,
                                  "reporting-key",
                                  contents=rand_string)
        self.tester.sleep(120)
        report_output = self.generate_report("s3", "csv", self.date)
        bucket_lines = self.tester.grep(self.bucket.name, report_output)
        for line in bucket_lines:
            bucket_data = self.parse_bucket_line(line)
            if not int(bucket_data.size) == 10:
                raise Exception('Failed to find proper size for %s' %
                                str(self.bucket))
            if not int(bucket_data.keys) == 1:
                raise Exception('Failed to find proper number of keys for %s' %
                                str(self.bucket))
            if not int(bucket_data.unit_time) > 16:
                raise Exception(
                    'Failed to find proper amount of usage for %s' %
                    str(self.bucket))

    def parse_bucket_line(self, line):
        BucketData = namedtuple('BucketData', 'name keys size unit_time')
        values = line.split(",")
        return BucketData(values[0], values[1], values[2], values[3])

    def generate_report(self, type, format, end_date):
        return self.clc.sys("source " + self.tester.credpath +
                            "/eucarc && eureport-generate-report -t " +
                            str(type) + " -f " + str(format) + " -e " +
                            str(end_date))

    def modify_property(self, property, value):
        """
        Modify a eucalyptus property through the command line euca-modify-property tool
        property        Property to modify
        value           Value to set it too
        """
        command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(
            property) + "=" + str(value)
        if self.clc.found(command, property):
            self.debug("Properly modified property " + property)
        else:
            raise Exception("Setting property " + property + " failed")
Beispiel #24
0
class PopulateUpgrade(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.tester.poll_count = 120

        self.security_groups = []

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.snapshot = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        pass

    def Instances(self, type="instance-store"):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        test_image = self.tester.get_emi(root_device_type=type)

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )

        self.security_groups.append(self.group)

        # Test: INSTANCESTORE VOLATTACH:no ADDR:user
        instance_1 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0]
        instance_1_address = self.tester.allocate_address()
        self.tester.associate_address(instance=instance_1, address=instance_1_address)

        # Test: INSTANCESTORE VOLATTACH:no ADDR:system
        instance_2 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0]

        # Test: INSTANCESTORE VOLATTACH:no ADDR:system
        instance_3 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0]

        # Test: INSTANCESTORE VOLATTACH:yes ADDR:user
        instance_4 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0]
        instance_4_address = self.tester.allocate_address()
        self.tester.associate_address(instance=instance_4, address=instance_4_address)
        volume = self.tester.create_volume(zone=self.zone)
        instance_4.attach_volume(volume=volume)

        # Test: INSTANCESTORE VOLATTACH:yes ADDR:system
        instance_5 = self.tester.run_instance(test_image, keypair=self.keypair.name, group=self.group.name).instances[0]
        volume = self.tester.create_volume(zone=self.zone)
        instance_5.attach_volume(volume=volume)

        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        self.security_groups.append(self.group)
        # Test: INSTANCESTORE VOLATTACH:yes ADDR:system
        instance_6 = self.tester.run_instance(test_image, group=self.group.name, private_addressing=True, is_reachable=False).instances[0]

    def PopulateAll(self):
        self.Instances("instance-store")
        self.Instances("ebs")
Beispiel #25
0
class ClusterBasics(unittest.TestCase):
    def setUp(self):

        if options.config_file:
            self.tester = Eucaops(config_file=options.config_file,
                                  password=options.clc_password)
        else:
            print "\tNeed to pass --config_file option. Try --help for more information\n"
            exit(1)

        ## If specific image wants to be tested, use that; if not, use any instance-store backed image.
        if options.image:
            self.image = self.tester.get_emi(emi=options.image)
        else:
            self.image = self.tester.get_emi(root_device_type="instance-store")

        self.keypair = self.tester.add_keypair(options.prefix + "-" +
                                               str(time.time()))
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"

        ### Identify type of instance to run
        if options.type == "random":
            options.type = random.choice([
                "m1.small", "c1.medium", "m1.large", "m1.xlarge", "c1.xlarge"
            ])

        ### Identify number of instances to run (i.e. number of security groups)
        self.num_vms = self.tester.get_available_vms(options.type)

        if self.num_vms >= options.number:
            self.available = options.number
        else:
            options.type = "m1.small"
            avail_vms = self.tester.get_available_vms(options.type)
            if avail_vms < options.number:
                self.tester.fail(
                    "Not enough m1.small vm types to run test with minimal of 3 security groups."
                )
            else:
                self.available = options.number

        self.security_groups = []
        self.reservations = []

    def tearDown(self):
        ### Clean up after running test case
        for reservation in self.reservations:
            self.tester.terminate_instances(reservation)
        for security_group in self.security_groups:
            self.tester.delete_group(security_group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.keypath = None
        self.keypair = None
        self.image = None
        self.num_vms = None
        self.available = None
        self.security_groups = None
        self.tester = None

    ### Test Cases ###
    def iptables_Cruft(self):
        ### Launch number of instances based upon number of security groups wanting to be tested.
        ### Take snapshot of iptables before creating security groups and launching instances.
        ### Use service manager to get to enabled CC to get iptables rules
        partition = self.tester.service_manager.partitions.keys()
        part = list(partition)[0]
        main_part = self.tester.service_manager.partitions.get(part)
        cc_machine = main_part.get_enabled_cc()
        cc_shell = self.tester.create_ssh(hostname=cc_machine.hostname,
                                          password=options.cc_password)
        pre_stdin, pre_iptables, pre_stderr = cc_shell.exec_command(
            "iptables-save | grep -v \"#\" | grep -v \"\:PRE\" | grep -v \"\:POST\" | grep -v \"\:INPUT\" | grep -v \"\:FORWARD\" | grep -v \"\:OUT\""
        )

        self.pre_iptables = list(pre_iptables)

        self.assertTrue(pre_stderr, "pre_Iptables_Snapshot failed.")

        ### Create security group for number of security groups we want to test.

        while self.available > 0:
            ### Create unique security group and authorize SSH and PING
            sec_group = self.tester.add_group(group_name=options.prefix + "-" +
                                              str(time.time()))
            self.assertNotEqual(len(sec_group.name), 0,
                                "Could not create group.")
            self.assertTrue(
                self.tester.authorize_group_by_name(group_name=sec_group.name),
                "Could not authorize group for SSH")
            self.assertTrue(
                self.tester.authorize_group_by_name(group_name=sec_group.name,
                                                    port=-1,
                                                    protocol="icmp"),
                "Could not authorize group for PING")
            self.security_groups.append(sec_group)

            ### Launch instance for the unique security group
            try:
                reservation = self.tester.run_instance(
                    self.image,
                    keypair=self.keypair.name,
                    group=sec_group.name,
                    type=options.type)
            except Exception, e:
                self.fail("Caught an exception when running the instance: " +
                          str(e))

            self.reservations.append(reservation)

            ### Decrement count of security groups and instances left to create
            self.available -= 1

        ### Take snapshot of iptables after deleting security groups and terminating instances.
        ### Use service manager to get to enabled CC to get iptables rules
        partition = self.tester.service_manager.partitions.keys()
        part = list(partition)[0]
        main_part = self.tester.service_manager.partitions.get(part)
        cc_machine = main_part.get_enabled_cc()
        cc_shell = self.tester.create_ssh(hostname=cc_machine.hostname,
                                          password=options.cc_password)
        post_stdin, post_iptables, post_stderr = cc_shell.exec_command(
            "iptables-save | grep -v \"#\" | grep -v \"\:PRE\" | grep -v \"\:POST\" | grep -v \"\:INPUT\" | grep -v \"\:FORWARD\" | grep -v \"\:OUT\""
        )

        self.post_iptables = list(post_iptables)

        self.assertTrue(post_stderr, "post_Iptables_Snapshot failed.")

        ### Evaluate pre and post iptables outputs to see if there is a difference.
        if (len(self.pre_iptables) != len(self.post_iptables)):
            ## Get different lines and print them
            iptables_diff = set(self.post_iptables) - set(self.pre_iptables)
            pp = pprint.PrettyPrinter(indent=4)

            self.tester.critical("\n======================================\n")
            self.tester.critical("Diffences between iptables snapshots: ")
            self.tester.critical("PRE-IPTABLES SNAPSHOT LENGTH: %i",
                                 len(self.pre_iptables))
            self.tester.critical("POST-IPTABLES SNAPSHOT LENGTH: %i",
                                 len(self.post_iptables))
            self.tester.critical("\n---------------------------------------\n")
            pp.pprint(list(iptables_diff))
            self.tester.critical("\n======================================\n")
Beispiel #26
0
class SSLTermination(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.cert_name = "elb-ssl-test-"+str(time.time())
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(keypair=self.keypair,
                                                                          group=self.group,
                                                                          zone=self.zone,
                                                                          port=self.load_balancer_port,
                                                                          filename='instance-name',
                                                                          image=self.image,
                                                                          count=1)

        self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
                                                              name="test-" + str(time.time()),
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)
        self.tester.register_lb_instances(self.load_balancer.name,
                                          self.web_servers.instances)

    def ssl_termination(self):
        """
        This will test ELB with HTTPS listener.

        @raise Exception:
        """
        self.debug("ELB SSl test")

        """get ELB ip info and setup url"""
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "https://{0}/instance-name".format(lb_ip)

        """upload server certificate"""
        self.tester.add_server_cert(cert_name=self.cert_name)

        """create a new listener on HTTPS port 443 and remove listener on port 80"""
        cert_arn = self.tester.get_server_cert(self.cert_name).arn
        listener = (443, 80, "HTTPS", cert_arn)
        self.tester.add_lb_listener(lb_name=self.load_balancer.name, listener=listener)
        self.tester.remove_lb_listener(lb_name=self.load_balancer.name, port=self.load_balancer_port)

        """perform https requests to LB"""
        self.tester.generate_http_requests(url=lb_url, count=10)

    def clean_method(self):
        self.tester.delete_server_cert(self.cert_name)
        self.tester.cleanup_artifacts()
Beispiel #27
0
class InstanceBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--region", default=None)
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.snapshot = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        ### Terminate the reservation if it is still up
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")

        ### DELETE group
        self.tester.delete_group(self.group)

        ### Delete keypair in cloud and from filesystem
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def MyTest(self):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        if not self.reservation:
            self.reservation = self.tester.run_instance(
                self.image, keypair=self.keypair.name, group=self.group.name)
        for instance in self.reservation.instances:
            self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                            'Instance did not go to running')
            self.assertNotEqual(instance.public_dns_name,
                                instance.private_ip_address,
                                'Public and private IP are the same')
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            'Could not ping instance')
            self.assertFalse(
                instance.found("ls -1 /dev/" + instance.rootfs_device + "2",
                               "No such file or directory"),
                'Did not find ephemeral storage at ' + instance.rootfs_device +
                "2")
Beispiel #28
0
class LoadBalancing(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(keypair=self.keypair,
                                                                          group=self.group,
                                                                          zone=self.zone,
                                                                          port=self.load_balancer_port)

        self.load_balancer = self.tester.create_load_balancer(zones= [self.zone],
                                                              name="test-" + str(time.time()),
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)

        ### Populate resources we will use and cleanup
        self.address = None
        self.volume = None
        self.snapshot = None
        self.reservation = None
        self.load_balancer = None


    def clean_method(self):
        ### Terminate the reservation if it is still up
        self.tester.cleanup_artifacts()

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer. Uses to backend instances with httpd servers
        """
        lb_url = self.load_balancer.dns_name + ":" + self.load_balancer_port
        self.tester.generate_http_requests(url=lb_url, count=100)
Beispiel #29
0
class AutoScalingBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.emi:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath)

    ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None

    def clean_method(self):
        ### DELETE group
        self.tester.delete_group(self.group)

        ### Delete keypair in cloud and from filesystem
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def AutoScalingBasics(self):
        ### test create  and describe launch config
        self.launch_config_name = 'Test-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=self.launch_config_name,
                                         image_id=self.image.id,
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        if len(self.tester.describe_launch_config([self.launch_config_name
                                                   ])) != 1:
            raise Exception('Launch Config not created')
        self.debug('**** Created Launch Config: ' +
                   self.tester.describe_launch_config(
                       [self.launch_config_name])[0].name)

        ### test create and describe auto scale group
        self.initial_size = len(self.tester.describe_as_group())
        self.auto_scaling_group_name = 'ASG-' + str(time.time())
        self.tester.create_as_group(group_name=self.auto_scaling_group_name,
                                    launch_config=self.launch_config_name,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=0,
                                    max_size=5,
                                    connection=self.tester.autoscale)
        if len(self.tester.describe_as_group([self.auto_scaling_group_name
                                              ])) != 1:
            raise Exception('Auto Scaling Group not created')
        self.debug("**** Created Auto Scaling Group: " +
                   self.tester.describe_as_group(
                       [self.auto_scaling_group_name])[0].name)

        ### Test Create and describe Auto Scaling Policy
        self.up_policy_name = "Up-Policy-" + str(time.time())
        self.up_size = 4
        self.tester.create_as_policy(name=self.up_policy_name,
                                     adjustment_type="ChangeInCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=4,
                                     cooldown=120)

        self.down_policy_name = "Down-Policy-" + str(time.time())
        self.down_size = -50
        self.tester.create_as_policy(name=self.down_policy_name,
                                     adjustment_type="PercentChangeInCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=self.down_size,
                                     cooldown=120)

        self.exact_policy_name = "Exact-Policy-" + str(time.time())
        self.exact_size = 0
        self.tester.create_as_policy(name=self.exact_policy_name,
                                     adjustment_type="ExactCapacity",
                                     as_name=self.auto_scaling_group_name,
                                     scaling_adjustment=self.exact_size,
                                     cooldown=120)

        ### Test all policies added to group
        if len(self.tester.autoscale.get_all_policies()) != 3:
            raise Exception('Auto Scaling policies not created')
        self.debug("**** Created Auto Scaling Policies: " +
                   self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        ### Test Execute ChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.up_policy_name,
                                      as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([
                self.auto_scaling_group_name
        ])[0].desired_capacity != self.up_size:
            raise Exception("Auto Scale Up not executed")
        self.debug(
            "Executed  ChangeInCapacity policy, increased desired capacity to: "
            + str(
                self.tester.describe_as_group([self.auto_scaling_group_name])
                [0].desired_capacity))

        ### Test Execute PercentChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.down_policy_name,
                                      as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([
                self.auto_scaling_group_name
        ])[0].desired_capacity != 0.5 * self.up_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug(
            "Executed PercentChangeInCapacity policy, decreased desired capacity to: "
            + str(
                self.tester.describe_as_group([self.auto_scaling_group_name])
                [0].desired_capacity))

        ### Test Execute ExactCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.exact_policy_name,
                                      as_group=self.auto_scaling_group_name)
        if self.tester.describe_as_group([
                self.auto_scaling_group_name
        ])[0].desired_capacity != self.exact_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed ExactCapacity policy, exact capacity is: " + str(
            self.tester.describe_as_group([self.auto_scaling_group_name])
            [0].desired_capacity))

        ### Test Delete all Auto Scaling Policies
        for policy in self.tester.autoscale.get_all_policies():
            self.tester.delete_as_policy(policy_name=policy.name,
                                         autoscale_group=policy.as_name)
        if len(self.tester.autoscale.get_all_policies()) != 0:
            raise Exception('Auto Scaling policy not deleted')
        self.debug("**** Deleted Auto Scaling Policy: " + self.up_policy_name +
                   " " + self.down_policy_name + " " + self.exact_policy_name)

        ### Test Delete Auto Scaling Group
        self.tester.delete_as_group(names=self.auto_scaling_group_name)
        if len(self.tester.describe_as_group([self.auto_scaling_group_name
                                              ])) != 0:
            raise Exception('Auto Scaling Group not deleted')
        self.debug('**** Deleted Auto Scaling Group: ' +
                   self.auto_scaling_group_name)

        ### pause for Auto scaling group to be deleted
        # TODO write wait/poll op for auto scaling groups
        # time.sleep(5)

        ### Test delete launch config
        self.tester.delete_launch_config(self.launch_config_name)
        if len(self.tester.describe_launch_config([self.launch_config_name
                                                   ])) != 0:
            raise Exception('Launch Config not deleted')
        self.debug('**** Deleted Launch Config: ' + self.launch_config_name)

    def AutoScalingInstanceBasics(self):
        """
        This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup
        """
        pass

    def too_many_launch_configs_test(self):
        """
        AWS enforces a 100 LC per account limit this tests what happens if we create more
        """
        for i in range(101):
            self.launch_config_name = 'Test-Launch-Config-' + str(i + 1)
            self.tester.create_launch_config(name=self.launch_config_name,
                                             image_id=self.image.id)
            if len(self.tester.describe_launch_config()) > 100:
                raise Exception(
                    "More then 100 launch configs exist in 1 account")
        for lc in self.tester.describe_launch_config():
            self.tester.delete_launch_config(lc.name)

    def too_many_policies_test(self):
        launch_config_name = 'LC-' + str(time.time())
        self.tester.create_launch_config(name=launch_config_name,
                                         image_id=self.image.id,
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        asg = 'ASG-' + str(time.time())
        self.tester.create_as_group(group_name=asg,
                                    launch_config=launch_config_name,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=0,
                                    max_size=5,
                                    connection=self.tester.autoscale)
        for i in range(26):
            policy_name = "Policy-" + str(i + 1)
            self.tester.create_as_policy(name=policy_name,
                                         adjustment_type="ExactCapacity",
                                         as_name=asg,
                                         scaling_adjustment=0,
                                         cooldown=120)
        if len(self.tester.autoscale.get_all_policies()) > 25:
            raise Exception(
                "More than 25 policies exist for 1 auto scaling group")
        self.tester.delete_as_group(names=asg)

    def too_many_as_groups(self):
        """
        AWS imposes a 20 ASG/acct limit
        """
        pass

    def clear_all(self):
        self.tester.delete_all_autoscaling_groups()
        self.tester.delete_all_launch_configs()
Beispiel #30
0
class EbsTestSuite(EutesterTestCase):

    tester = None
    zonelist = []
    snaps = []
    keypair = None
    group = None
    multicluster = False
    image = None

    def __init__(
        self,
        tester=None,
        zone=None,
        config_file="../input/2b_tested.lst",
        password="******",
        credpath=None,
        volumes=None,
        keypair=None,
        group=None,
        image=None,
        vmtype=None,
        eof=1,
    ):

        if tester is None:
            self.tester = Eucaops(config_file=config_file, password=password, credpath=credpath)
        else:
            self.tester = tester
        self.tester.exit_on_fail = eof

        self.testlist = []

        self.image = image
        self.vmtype = vmtype
        self.zone = None
        self.zonelist = []

        # create some zone objects and append them to the zonelist
        if self.zone is not None:
            partition = self.tester.service_manager.partitions.get(zone)
            self.zone = TestZone(zone)
            self.zonelist.append(self.zone)
        else:
            self.setup_testzones()

        # If the list of volumes passed in looks good, sort them into the zones
        if self.volumes_list_check(volumes):
            self.sort_volumes(volumes)

        # Setup our security group for later use
        if group is not None:
            self.group = group
        else:
            group_name = "EbsTestGroup"

            try:
                self.group = self.tester.add_group(group_name)
                self.tester.authorize_group_by_name(self.group.name)
                self.tester.authorize_group_by_name(self.group.name, protocol="icmp", port=-1)
            except Exception, e:
                raise Exception("Error when setting up group:" + str(group_name) + ", Error:" + str(e))

        # Setup the keypairs for later use
        try:
            if keypair is not None:
                self.keypair = keypair
            else:
                keys = self.tester.get_all_current_local_keys()
                if keys != []:
                    self.keypair = keys[0]
                else:
                    self.keypair = keypair = self.tester.add_keypair("ebs_test_key-" + str(time.time()))
        except Exception, ke:
            raise Exception("Failed to find/create a keypair, error:" + str(ke))
  #Number of ping attempts used to test instance state, before giving up on a running instance 
  ping_retry=100
  #The eutester cloud tester object 
  tester = Eucaops( password=password, config_file=config, credpath=credpath)
  
  #sets tester to throw exception upon failure
  if (options.eof):
      tester.exit_on_fail = 1
  else:
      tester.exit_on_fail = 0 
 
  try:
      ### Create security group if it does not exist. Add ssh authorization to it. 
      try:
          group = tester.add_group(group_name)
          tester.authorize_group_by_name(group.name)
          tester.authorize_group_by_name(group.name,protocol="icmp",port=-1)
      except Exception, e:    
          raise Exception("Error when setting up group:"+str(group_name)+", Error:"+str(e))
          
      
      
      #Get the remote file size from the http header of the url given
      try:        
          url = url.replace('http://','')
          host = url.split('/')[0]
          path = url.replace(host,'')
          pmsg("get_remote_file, host("+host+") path("+path+")")
          conn=httplib.HTTPConnection(host)
          conn.request("HEAD", path)  
          res=conn.getresponse()
Beispiel #32
0
class CloudFormations(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = CFNops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

    def InstanceVolumeTemplate(self):
        self.stack_name = "volumeTest{0}".format(int(time.time()))
        template = Template()
        keyname_param = template.add_parameter(
            Parameter(
                "KeyName",
                Description="Name of an existing EC2 KeyPair "
                "to enable SSH access to the instance",
                Type="String",
            ))
        template.add_mapping('RegionMap',
                             {"": {
                                 "AMI": self.tester.get_emi().id
                             }})
        for i in xrange(2):
            ec2_instance = template.add_resource(
                ec2.Instance("Instance{0}".format(i),
                             ImageId=FindInMap("RegionMap", Ref("AWS::Region"),
                                               "AMI"),
                             InstanceType="t1.micro",
                             KeyName=Ref(keyname_param),
                             SecurityGroups=[self.group.name],
                             UserData=Base64("80")))
            vol = template.add_resource(
                ec2.Volume("Volume{0}".format(i),
                           Size="8",
                           AvailabilityZone=GetAtt("Instance{0}".format(i),
                                                   "AvailabilityZone")))
            mount = template.add_resource(
                ec2.VolumeAttachment("MountPt{0}".format(i),
                                     InstanceId=Ref("Instance{0}".format(i)),
                                     VolumeId=Ref("Volume{0}".format(i)),
                                     Device="/dev/vdc"))
        stack = self.tester.create_stack(self.stack_name,
                                         template.to_json(),
                                         parameters=[("KeyName",
                                                      self.keypair.name)])

        def stack_completed():
            return self.tester.cloudformation.describe_stacks(
                self.stack_name).status == "CREATE_COMPLETE"

        self.tester.wait_for_result(stack_completed, True, timeout=600)
        self.tester.delete_stack(self.stack_name)

    def clean_method(self):
        self.tester.cleanup_artifacts()
Beispiel #33
0
class MigrationTest(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.parser.add_argument('--imgurl',
                        help="BFEBS Image to splat down", default=None)
        self.get_args()
        self.tester = Eucaops( config_file=self.args.config, password=self.args.password)
        self.numberOfNodes = self.tester.service_manager.get_all_node_controllers()
        if len(self.numberOfNodes) < 2:
            exit("Not enough NCs to test instance migration.")

        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )

        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.numberOfResources = 3
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        try:
            self.tester.get_emi(root_device_type="ebs")
        except:
            bfebs = self.do_with_args(BFEBSBasics)
            bfebs.RegisterImage()

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def MigrationBasic(self, volume=None):
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.reservation = self.tester.run_instance(self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        instance = self.reservation.instances[0]
        assert isinstance(instance, EuInstance)
        volume_device = None
        if volume is not None:
            volume_device = instance.attach_euvolume(volume)

        self.tester.service_manager.populate_nodes()
        source_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0]
        enabled_clc.sys( "source " + self.tester.credpath + "/eucarc &&" +
                         self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " + instance.id )

        def wait_for_new_nc():
            self.tester.service_manager.populate_nodes()
            destination_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0]
            return source_nc.hostname == destination_nc.hostname

        self.tester.wait_for_result(wait_for_new_nc, False, timeout=600, poll_wait=60)
        self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance')

        if volume_device:
            instance.sys("ls " + volume_device, code=0)

        destination_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0]
        if destination_nc.machine.distro.name is not "vmware":
            destination_nc.machine.sys("virsh list | grep " + instance.id, code=0)
        else:
            destination_nc.machine.sys("esxcli vm process list | grep " + instance.id, code=0)

        self.tester.terminate_instances(reservation=self.reservation)
        if volume is not None:
            self.tester.delete_volume(volume)

    def MigrationInstanceStoreWithVol(self):
        volume = self.tester.create_volume(zone=self.zone)
        assert isinstance(volume, EuVolume)
        self.MigrationBasic(volume)

    def MigrationBasicEBSBacked(self, volume=None):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.MigrationBasic(volume)

    def MigrationBasicEBSBackedWithVol(self):
        volume = self.tester.create_volume(zone=self.zone)
        assert isinstance(volume, EuVolume)
        self.MigrationBasicEBSBacked(volume)

    def MigrateToDest(self):
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.reservation = self.tester.run_instance(self.image, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        instance = self.reservation.instances[0]
        self.tester.service_manager.populate_nodes()
        self.source_nc = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0]

        all_nc = self.tester.service_manager.get_all_node_controllers()
        self.destination_nc = None

        for nc in all_nc:
            if nc.machine.hostname != self.source_nc.machine.hostname:
                self.destination_nc = nc
                enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " +
                                    self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " +
                                    instance.id + " --dest " + self.destination_nc.machine.hostname)

                def wait_for_new_nc():
                    self.tester.service_manager.populate_nodes()
                    self.instance_node = self.tester.service_manager.get_all_node_controllers(instance_id=instance.id)[0]
                    return self.instance_node.hostname == self.destination_nc.hostname

                self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60)
                self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance')

        # migrate the instance to it's original source node
        self.destination_nc = self.source_nc
        enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " +
                            self.tester.eucapath + "/usr/sbin/euca-migrate-instances -i " +
                            instance.id + " --dest " + self.destination_nc.machine.hostname)

        self.tester.wait_for_result(wait_for_new_nc, True, timeout=600, poll_wait=60)
        self.assertTrue(self.tester.ping(instance.public_dns_name), 'Could not ping instance')

        self.tester.terminate_instances(reservation=self.reservation)

    def MigrationToDestEBSBacked(self):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.MigrateToDest()

    def EvacuateNC(self, volume_list = []):
        instance_list = []
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.nodes = self.tester.service_manager.populate_nodes()
        # pop out one NC to fill in
        self.source_nc = self.nodes.pop()

        def set_state(node, state):
            # retrying, see EUCA-6389
            while node.state != state:
                self.tester.debug(node.hostname + ": SET STATE TO " + state)
                enabled_clc.sys("euca-modify-service -s " + state + " " + node.hostname)
                self.tester.sleep(10)
                tmpnodes = self.tester.service_manager.populate_nodes()
                for tmpnode in tmpnodes:
                    if tmpnode.hostname == node.hostname:
                        node = tmpnode

        # stop all the NCs
        for node in self.nodes:
            set_state(node, "STOPPED")

        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = self.tester.run_instance(self.image, min=3, max=3, username=self.args.instance_user, keypair=self.keypair.name, group=self.group.name, zone=self.zone)

        for i in xrange(3):
            instance = self.reservation.instances[i]
            instance_list.append(instance)
            assert isinstance(instance, EuInstance)
            volume_device = None
            if volume_list:
                volume_device = instance.attach_euvolume(volume_list[i])

        self.nodes = self.tester.service_manager.populate_nodes()
        # start all the NCs
        for node in self.nodes:
            if node.hostname is not self.source_nc.hostname:
                set_state(node, "ENABLED")

        self.nodes = self.tester.service_manager.populate_nodes()
        # evacuate source NC
        enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " +
                            self.tester.eucapath + "/usr/sbin/euca-migrate-instances --source " +
                        self.source_nc.machine.hostname)

        def wait_for_evacuation():
            self.tester.service_manager.populate_nodes()
            if self.source_nc.machine.distro.name is "vmware":
                emptyNC = self.source_nc.sys("esxcli vm process list | grep 'Display Name' | awk '{print $3}'")
            else:
                emptyNC = self.source_nc.get_virsh_list()
            return len(emptyNC) == 0

        self.tester.wait_for_result(wait_for_evacuation, True, timeout=600, poll_wait=60)

        for inst in instance_list:
            self.assertTrue(self.tester.ping(inst.public_dns_name), 'Could not ping instance')

        self.tester.terminate_instances(reservation=self.reservation)
        if volume_list:
            self.tester.delete_volumes(volume_list)

    def EvacuateNCWithVol(self):
        volume_list = []
        for i in xrange(self.numberOfResources):
            volume = self.tester.create_volume(zone=self.zone)
            assert isinstance(volume, EuVolume)
            volume_list.append(volume)
        self.EvacuateNC(volume_list)

    def EvacuateNCAllEBS(self):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.EvacuateNC()
Beispiel #34
0
class StickinessBasics(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config,
                                  password=self.args.password)
        # test resource hash
        self.test_hash = str(int(time.time()))

        # Add and authorize a group for the instances
        self.group = self.tester.add_group(group_name="group-" + self.test_hash)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")
        self.tester.authorize_group_by_name(group_name=self.group.name, port=80, protocol="tcp")

        # Generate a keypair for the instances
        self.keypair = self.tester.add_keypair("keypair-" + self.test_hash)
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        # User data file
        self.user_data = "./testcases/cloud_user/elb/test_data/webserver_user_data.sh"

        # Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi()

        # Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        # create base load balancer
        self.load_balancer_port = 80
        self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
                                                              name="elb-" + self.test_hash,
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)

        # create autoscaling group of webservers that register to the load balancer
        self.count = 2
        (self.web_servers) = self.tester.create_as_webservers(name=self.test_hash,
                                                              keypair=self.keypair.name,
                                                              group=self.group.name,
                                                              zone=self.zone,
                                                              image=self.image.id,
                                                              count=self.count,
                                                              user_data=self.user_data,
                                                              load_balancer=self.load_balancer.name)

        # web servers scaling group
        self.asg = self.tester.describe_as_group(name="asg-"+self.test_hash)

        # wait until scaling instances are InService with the load balancer before continuing - 5 min timeout
        assert self.tester.wait_for_result(self.tester.wait_for_lb_instances, True, timeout=300,
                                           lb=self.load_balancer.name, number=self.count)

    def clean_method(self):
        self.tester.cleanup_artifacts()
        if self.tester.test_resources["security-groups"]:
            for group in self.tester.test_resources["security-groups"]:
                self.tester.wait_for_result(self.tester.gracefully_delete_group, True, timeout=60, group=group)

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer.
        Uses to backend instances with httpd servers.
        """
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "http://{0}:{1}/instance-name".format(lb_ip, self.load_balancer_port)
        return self.tester.generate_http_requests(url=lb_url, count=100, worker_threads=1)

    def lb_cookie_session_affinity_test(self):
        lbpolicy = "LB-Policy"
        self.tester.create_lb_cookie_stickiness_policy(cookie_expiration_period=300,
                                                       lb_name=self.load_balancer.name,
                                                       policy_name=lbpolicy)
        # test lb stickiness
        self.tester.sleep(2)
        self.debug("Testing LB cookie stickiness")
        self.tester.set_lb_policy(lb_name=self.load_balancer.name, lb_port=80, policy_name=lbpolicy)
        responses = self.GenerateRequests()
        host = responses[0]
        for response in responses:
            if response != host:
                raise Exception(
                    "Expected same response due to load balancer stickiness policy. Got initial response: " + host +
                    " subsequent response: " + response)
        return

    def app_cookie_session_affinity_test(self):
        acpolicy = "AC-Policy"
        self.tester.create_app_cookie_stickiness_policy(name="test-cookie",
                                                        lb_name=self.load_balancer.name,
                                                        policy_name=acpolicy)
        # test app cookie stickiness
        self.tester.set_lb_policy(lb_name=self.load_balancer.name, lb_port=80, policy_name=acpolicy)
        self.debug("Testing App Cookie stickiness")
        responses = self.GenerateRequests()
        # since we use the same cookie name on the backing elb instances, the first contacts with the ELB can possibly
        # result in hitting different back ends. After the first 2 calls they should all ony go to 1 backend.
        host = responses[2]
        for response in responses[2:]:
            if response != host:
                raise Exception(
                    "Expected same response due to app cookie stickiness policy. Got initial response: " + host +
                    " subsequent response: " + response)
        return
Beispiel #35
0
class PopulateUpgrade(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--region", default=None)
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.tester.poll_count = 120

        self.security_groups = []

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.snapshot = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        pass

    def Instances(self, type="instance-store"):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        test_image = self.tester.get_emi(root_device_type=type)

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")

        self.security_groups.append(self.group)

        # Test: INSTANCESTORE VOLATTACH:no ADDR:user
        instance_1 = self.tester.run_instance(
            test_image, keypair=self.keypair.name,
            group=self.group.name).instances[0]
        instance_1_address = self.tester.allocate_address()
        self.tester.associate_address(instance=instance_1,
                                      address=instance_1_address)

        # Test: INSTANCESTORE VOLATTACH:no ADDR:system
        instance_2 = self.tester.run_instance(
            test_image, keypair=self.keypair.name,
            group=self.group.name).instances[0]

        # Test: INSTANCESTORE VOLATTACH:no ADDR:system
        instance_3 = self.tester.run_instance(test_image,
                                              group=self.group.name,
                                              private_addressing=True,
                                              is_reachable=False).instances[0]

        # Test: INSTANCESTORE VOLATTACH:yes ADDR:user
        instance_4 = self.tester.run_instance(
            test_image, keypair=self.keypair.name,
            group=self.group.name).instances[0]
        instance_4_address = self.tester.allocate_address()
        self.tester.associate_address(instance=instance_4,
                                      address=instance_4_address)
        volume = self.tester.create_volume(zone=self.zone)
        instance_4.attach_volume(volume=volume)

        # Test: INSTANCESTORE VOLATTACH:yes ADDR:system
        instance_5 = self.tester.run_instance(
            test_image, keypair=self.keypair.name,
            group=self.group.name).instances[0]
        volume = self.tester.create_volume(zone=self.zone)
        instance_5.attach_volume(volume=volume)

        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        self.security_groups.append(self.group)
        # Test: INSTANCESTORE VOLATTACH:yes ADDR:system
        instance_6 = self.tester.run_instance(test_image,
                                              group=self.group.name,
                                              private_addressing=True,
                                              is_reachable=False).instances[0]

    def PopulateAll(self):
        self.Instances("instance-store")
        self.Instances("ebs")
Beispiel #36
0
class TaggingBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.address = None
        self.volume = None
        self.snapshot = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        ### Terminate the reservation if it is still up
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")

        if self.volume:
            self.tester.delete_volume(self.volume,timeout=600)

        if self.snapshot:
            self.tester.delete_snapshot(self.snapshot)

        ### DELETE group
        self.tester.delete_group(self.group)

        ### Delete keypair in cloud and from filesystem
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def InstanceTagging(self):
        """
        This case was developed to exercise tagging of an instance resource
        """
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name)
        test_instance = None
        tags = { u'name': 'instance-tag-test', u'location' : 'over there'}
        for instance in self.reservation.instances:
            instance.create_tags(tags)
            test_instance = instance

        ### Test Filtering , u'tag:location' : 'over there'
        tag_filter = { u'tag:name': u'instance-tag-test'}
        reservations = self.tester.ec2.get_all_instances(filters=tag_filter)
        if len(reservations) != 1:
            raise Exception('Filter for instances returned too many results')
        reservation = reservations[0]
        if self.reservation.id not in reservation.id:
            raise Exception('Wrong instance id returned after filtering, Expected: ' + self.reservation.id  + ' Received: ' + reservation.id )

        ### Test non-tag Filtering
        ### Filters can be found here, most will be tested manually, but a spot check should be added
        ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeInstances.html
        new_group = self.tester.add_group("filter-test")
        self.tester.authorize_group_by_name(group_name=new_group.name )
        self.tester.authorize_group_by_name(group_name=new_group.name, port=-1, protocol="icmp" )
        filter_test_reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=new_group.name)
        keypair_filter = {u'key-name': self.keypair.name}
        group_filter = {u'group-name': new_group.name}

        keypair_match = self.tester.ec2.get_all_instances(filters=keypair_filter)
        group_match = self.tester.ec2.get_all_instances(filters=group_filter)
        self.tester.terminate_instances(filter_test_reservation)
        self.tester.delete_group(new_group)
        self.tester.delete_keypair(self.keypair)

        if len(group_match) != 1:
            raise Exception("Non-tag Filtering of instances by group name: " + str(len(group_match))  + " expected: 1")
        if len(keypair_match) != 2:
            raise Exception("Non-tag Filtering of instances by keypair name: " + str(len(keypair_match))  + " expected: 2")

        ### Test Deletion
        test_instance.delete_tags(tags)
        instances = self.tester.ec2.get_all_instances(filters=tag_filter)
        if len(instances) != 0:
            raise Exception('Filter returned instances when there shouldnt be any')

        if all(item in test_instance.tags.items() for item in tags.items()):
            raise Exception('Tags still returned after deletion')
        #self.test_restrictions(test_instance)
        #self.test_in_series(test_instance)
        self.tester.terminate_instances(self.reservation)
        self.reservation = None

    def VolumeTagging(self):
        """
        This case was developed to exercise tagging of an instance resource
        """
        self.volume = self.tester.create_volume(zone=self.zone)
        tag_id = 'volume-tag-test-' + str(int(time.time()))
        tags = { u'name': tag_id, u'location' : 'datacenter'}
        self.volume.create_tags(tags)

        ### Test Filtering
        tag_filter = { u'tag:name': tag_id}
        volumes = self.tester.ec2.get_all_volumes(filters=tag_filter)
        if len(volumes) is 0:
            raise Exception('Filter for volumes returned no results:"{0}", filter:"{1}"'
                            .format(volumes, tag_filter))
        if len(volumes) is not 1:
            raise Exception('Filter for volumes returned too many results:"{0}", filter:"{1}"'
                            .format(volumes, tag_filter))
        if volumes[0].id != self.volume.id:
            raise Exception('Wrong volume ID returned after filtering:"{0}", filter:"{1}"'
                            .format(volumes, tag_filter))

        ### Test non-tag Filtering
        ### Filters can be found here, most will be tested manually, but a spot check should be added
        ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeImages.html
        vol_size = 3
        filter_test_volume_1 = self.tester.create_volume(zone=self.zone, size=vol_size)
        filter_test_volume_2 = self.tester.create_volume(zone=self.zone, size=vol_size)
        size_filter = {u'size': vol_size }
        id_filter = {u'volume-id': self.volume.id}

        size_match = self.tester.ec2.get_all_volumes(filters=size_filter)
        id_match = self.tester.ec2.get_all_volumes(filters=id_filter)

        self.tester.delete_volume(filter_test_volume_1)
        self.tester.delete_volume(filter_test_volume_2)

        for sz_vol in size_match:
            if sz_vol.size != vol_size:
                try:
                    self.debug('Size filter returned the following volumes:{0}'.format(size_match))
                except:
                    pass
                raise Exception('Filtering of volumes by size:"{0}" returned a volume of '
                                'wrong size. vol:{1}, size:{2}'
                                .format(vol_size, sz_vol, sz_vol.size ))
        if len(id_match) != 1:
            try:
                self.debug('Id filter returned the following volumes:{0}'.format(id_match))
            except:
                pass
            raise Exception("Filtering of volumes by id:'{0}' returned {1} volumes, expected 1"
                            .format(self.volume.id, len(id_match or [])))

        ### Test Deletion
        self.volume.delete_tags(tags)
        volumes = self.tester.ec2.get_all_volumes(filters=tag_filter)
        if len(volumes) != 0:
            raise Exception('Filter returned volumes when there shouldnt be any')
        if self.volume.tags != {}:
            raise Exception('Tags still returned after deleting them from volume')
        #self.test_restrictions(self.volume)
        #self.test_in_series(self.volume)

    def SnapshotTagging(self):
        """
        This case was developed to exercise tagging of an instance resource
        """
        if not self.volume:
            self.volume = self.tester.create_volume(zone=self.zone)
        self.snapshot = self.tester.create_snapshot_from_volume(self.volume)
        tags = { u'name': 'snapshot-tag-test', u'location' : 'over there'}
        self.snapshot.create_tags(tags)

        ### Test Filtering , u'tag:location' : 'over there'
        tag_filter = { u'tag:name': 'snapshot-tag-test'}
        snapshots = self.tester.ec2.get_all_snapshots(filters=tag_filter)
        if len(snapshots) != 1:
            raise Exception('Filter for instances returned too many results')
        if snapshots[0].id != self.snapshot.id:
            raise Exception('Wrong instance id returned after filtering')

        ### Test non-tag Filtering
        ### Filters can be found here, most will be tested manually, but a spot check should be added
        ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeSnapshots.html
        filter_description = "filtering" + str(int(time.time()))
        filter_test_snapshot = self.tester.create_snapshot_from_volume(self.volume, description=filter_description)

        description_filter = {u'description': filter_description }
        volume_filter = {u'volume-id': self.volume.id}

        description_match = self.tester.ec2.get_all_snapshots(filters=description_filter)
        volume_match = self.tester.ec2.get_all_snapshots(filters=volume_filter)

        self.tester.delete_snapshot(filter_test_snapshot)

        if len(description_match) != 1:
            raise Exception("Non-tag Filtering of snapshots by volume description: " + str(len(description_match))  + " expected: 1")

        if len(volume_match) != 2:
            raise Exception("Non-tag Filtering of snapshots by volume id returned: " + str(len(volume_match))  + " expected: 2")

        ### Test Deletion
        self.snapshot.delete_tags(tags)
        snapshots= self.tester.ec2.get_all_snapshots(filters=tag_filter)
        if len(snapshots) != 0:
            raise Exception('Filter returned snapshots when there shouldnt be any')
        if self.snapshot.tags != {}:
            raise Exception('Tags still returned after deleting them from volume')
        #self.test_restrictions(self.snapshot)
        #self.test_in_series(self.snapshot)
        self.tester.delete_snapshot(self.snapshot)
        self.snapshot = None

    def ImageTagging(self):
        """
        This case was developed to exercise tagging of an instance resource
        """
        nametag = u'ImageTaggingName'
        locationtag =  u'ImageTaggingLocation'
        tags = { nametag: 'image-tag-test', locationtag : 'over there'}
        orig_image_tags = self.image.tags
        self.tester.create_tags([self.image.id], tags)

        ### Test Tag Filtering , u'tag:location' : 'over there'
        tag_filter = { u'tag:'+nametag: 'image-tag-test'}
        images = self.tester.ec2.get_all_images(filters=tag_filter)
        if len(images) != 1:
            raise Exception('Filter for instances returned too many results')
        if images[0].id != self.image.id:
            raise Exception('Wrong instance id returned after filtering')

        ### Test non-tag Filtering
        ### Filters can be found here, most will be tested manually, but a spot check should be added
        ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeImages.html
        image_description = "image-filtering"
        filter_image_id = self.tester.register_image(
            image_location=self.image.location,
            description=image_description,
            virtualization_type="hvm")

        description_filter = {u'description': image_description }
        location_filter = {u'manifest-location': self.image.location}

        description_match = self.tester.ec2.get_all_images(filters=description_filter)
        location_match = self.tester.ec2.get_all_images(filters=location_filter)
        filter_image = self.tester.get_emi(emi=filter_image_id)
        self.tester.deregister_image(filter_image)

        if len(description_match) != 1:
            raise Exception("Non-tag Filtering of volumes by size: " + str(len(description_match)) + " expected: 1")
        if len(location_match) != 2:
            raise Exception("Non-tag Filtering of volumes by zone: " + str(len(location_match)) + " expected: 2")

        ### Test Deletion
        self.tester.delete_tags([self.image.id], tags)
        images = self.tester.ec2.get_all_images(filters=tag_filter)
        if len(images) != 0:
            raise Exception('Filter returned images when there shouldnt be any')
        for tag in tags:
            if tag in self.image.tags:
                raise Exception('Tags still returned after deleting them from image: ' + str(self.image.tags))
        #self.test_restrictions(self.image)
        #self.test_in_series(self.image)

    def SecurityGroupTagging(self):
        """
        This case was developed to exercise tagging of an security group resource
        """
        tags = { u'name': 'security-tag-test', u'location' : 'over there'}
        self.debug("Security group ID: " + self.group.id)
        self.tester.create_tags([self.group.id], tags)

        ### Test Tag Filtering , u'tag:location' : 'over there'
        tag_filter = { u'tag:name': 'security-tag-test'}
        groups = self.tester.ec2.get_all_security_groups(filters=tag_filter)
        if len(groups) != 1:
            raise Exception('Filter for groups returned too many results')
        if groups[0].id != self.group.id:
            raise Exception('Wrong group id returned after filtering')

        ### Test non-tag Filtering
        ### Filters can be found here, most will be tested manually, but a spot check should be added
        ### http://docs.aws.amazon.com/AWSEC2/latest/CommandLineReference/ApiReference-cmd-DescribeSecurityGroups.html
        group_name = "filter-test"
        group_description = "group-filtering"
        filter_group = self.tester.add_group(group_name=group_name, description=group_description)
        filter_group_2 = self.tester.add_group(group_name=group_name + "2", description=group_description)

        description_filter = {u'description': group_description }
        group_id_filter = {u'group-id': filter_group.id}
        description_match = self.tester.ec2.get_all_security_groups(filters=description_filter)
        self.debug("Groups matching description:" + str(description_match))
        group_id_match = self.tester.ec2.get_all_security_groups(filters=group_id_filter)
        self.debug("Groups matching owner-id (" + group_id_filter[u'group-id']  + "):" + str(group_id_match))

        self.tester.delete_group(filter_group)
        self.tester.delete_group(filter_group_2)

        if len(description_match) != 2:
            raise Exception("Non-tag Filtering of security groups by description: " + str(len(description_match))  + " expected: 2")
        if len(group_id_match) != 1:
            raise Exception("Non-tag Filtering of security groups by id: " + str(len(group_id_match))  + " expected: 1")

        ### Test Deletion
        self.tester.delete_tags([self.group.id], tags)
        groups = self.tester.ec2.get_all_security_groups(filters=tag_filter)
        if len(groups) != 0:
            raise Exception('Filter returned security groups when there shouldnt be any')
        if self.group.tags != {}:
            raise Exception('Tags still returned after deleting them from '
                            'security group:' + str(self.group.tags) )
        #self.test_restrictions(self.group)
        #self.test_in_series(self.group)


    def test_restrictions(self, resource):
        max_tags_number = 10
        max_tags = {}

        for i in xrange(max_tags_number):
            max_tags[u'key' + str(i)] = 'value' + str(i)

        self.test_tag_creation(max_tags, resource=resource, fail_message="Failure when trying to add max allowable tags (" + str(max_tags_number) + ")", expected_outcome=True)
        self.test_tag_deletion(max_tags, resource=resource,fail_message="Failure when trying to delete max allowable tags (" + str(max_tags_number) + ")", expected_outcome=True)

        too_many_tags = {}
        for i in xrange(max_tags_number + 1):
            too_many_tags[u'key' + str(i)] = 'value' + str(i)

        self.test_tag_creation(too_many_tags, resource=resource,fail_message="Allowed too many tags to be created", expected_outcome=False)

        max_key = u'0' * 127

        maximum_key_length = { max_key : 'my value'}
        self.test_tag_creation(maximum_key_length, resource=resource, fail_message="Unable to use a key with " + str(max_key) + " characters", expected_outcome=True)
        self.test_tag_deletion(maximum_key_length, resource=resource, fail_message="Unable to delete a key with " + str(max_key) + " characters", expected_outcome=True)

        key_too_large = { max_key + u'0' : 'my value'}
        self.test_tag_creation(key_too_large, resource=resource, fail_message="Allowed key with more than " + str(max_key) + " chars", expected_outcome=False)

        maximum_value = '0' * 255

        maximum_value_length = { u'my_key': maximum_value}
        self.test_tag_creation(maximum_value_length, resource=resource, fail_message="Unable to use a value with " + str(maximum_value) + " characters", expected_outcome=True)
        self.test_tag_deletion(maximum_value_length, resource=resource, fail_message="Unable to delete a value with " + str(maximum_value) + " characters", expected_outcome=True)

        value_too_large = { u'my_key': maximum_value + '0'}
        self.test_tag_creation(value_too_large, resource=resource, fail_message="Allowed value with more than " + str(maximum_value) + " chars", expected_outcome=False)

        aws_key_prefix = { u'aws:something': 'asdfadsf'}
        self.test_tag_creation(aws_key_prefix, resource=resource, fail_message="Allowed key with 'aws:' prefix'", expected_outcome=False)

        aws_value_prefix = { u'my_key': 'aws:somethingelse'}
        self.test_tag_creation(aws_value_prefix, resource=resource, fail_message="Did not allow creation value with 'aws:' prefix'", expected_outcome=True)
        self.test_tag_creation(aws_value_prefix, resource=resource, fail_message="Did not allow deletion of value with 'aws:' prefix'", expected_outcome=True)

        lower_case = {u'case': 'value'}
        upper_case = {u'CASE': 'value'}
        self.test_tag_creation(lower_case, resource=resource, fail_message="Unable to add key with all lower case", expected_outcome=True)
        self.test_tag_creation(upper_case, resource=resource, fail_message="Case sensitivity not enforced, unable to create tag with different capitalization", expected_outcome=True)
        self.test_tag_deletion(lower_case, resource=resource, fail_message="Unable to delete a tag, when testing case sensitivity", expected_outcome=True)
        self.test_tag_deletion(upper_case, resource=resource, fail_message="Unable to delete a tag, when testing case sensitivity", expected_outcome=True)

    def test_tag_creation(self, tags, resource, fail_message, expected_outcome=True, timeout=600):
        actual_outcome = None
        exception = None
        try:
            resource.create_tags(tags, timeout=timeout)
            actual_outcome =  True
        except Exception, e:
            exception = e
            actual_outcome =  False
        finally:
Beispiel #37
0
class NCAdmin(EutesterTestCase, InstanceBasics):

    def __init__(self, config_file="cloud.conf", password="******"):
        self.tester = Eucaops( config_file=config_file, password=password)
        self.servman = self.tester.service_manager
        self.nc_list = self.tester.get_component_machines("nc")
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None
        self.conf_file = '%s/etc/eucalyptus/eucalyptus.conf' % self.tester.eucapath
        self.nc_restart_time = 30

    def restart_nc(self):
        ## Use reboot testcase to ensure we are able to attach volumes and run instances
        testcase = self.create_testcase_from_method(self.Reboot)

        ## run at least as many instances as there are NCs
        for i in xrange(len(self.nc_list)):
            testcase.run()

        for nc in self.nc_list:
            nc.sys('service eucalyptus-nc restart')

        ## Wait for ncs to come back up
        self.tester.sleep(self.nc_restart_time)

        ## rerun testcase
        for i in xrange(len(self.nc_list)):
            testcase.run()

    def replace_conf_property(self, nc, property, replacement):
        self.default_conf_property(nc, property)
        nc.sys('echo "' + property + '=' + replacement + '" >> ' + self.conf_file)

    def default_conf_property(self,nc, property):
        nc.sys('sed -i \'s/^' +  property + '/#' + property  +'/g\' ' + self.conf_file)

    def disable_caching(self):
        ## Use basic instance testcase testcase to ensure we are able to attach volumes and run instances
        testcase = self.create_testcase_from_method(self.BasicInstanceChecks)
        property = 'NC_CACHE_SIZE'
        ## run at least as many instances as there are NCs
        for i in xrange(len(self.nc_list)):
            testcase.run()

        for nc in self.nc_list:
            self.replace_conf_property(nc,  property, "0")
            command_list = ['service eucalyptus-nc stop',
                            'rm -rf {0}/var/lib/eucalyptus/instances/cache/*'.format(self.tester.eucapath),
                            'service eucalyptus-nc start']
            for command in command_list:
                nc.sys(command)

        ## Wait for ncs to come back up
        self.tester.sleep(self.nc_restart_time)

        testcase = self.create_testcase_from_method(self.Churn)
        ## rerun testcase

        testcase.run()
        for nc in self.nc_list:
            self.default_conf_property(nc, property)

    def cleanup(self):
        self.tester.cleanup_artifacts()

    def run_suite(self):
        self.testlist = []
        testlist = self.testlist
        #testlist.append(self.create_testcase_from_method(self.restart_nc))
        testlist.append(self.create_testcase_from_method(self.disable_caching))
        self.run_test_case_list(testlist)
        self.cleanup()
Beispiel #38
0
class AutoScalingBasics(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = Eucaops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store", not_location="loadbalancer")
        self.address = None
        self.asg = None

    def clean_method(self):
        if self.asg:
            self.tester.wait_for_result(self.gracefully_delete, True)
            self.tester.delete_as_group(self.asg.name, force=True)
        self.tester.cleanup_artifacts()

    def AutoScalingBasics(self):
        ### create launch configuration
        self.launch_config_name = 'Test-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=self.launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])

        ### create auto scale group
        self.auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(group_name=self.auto_scaling_group_name,
                                    availability_zones=self.tester.get_zones(),
                                    launch_config=self.launch_config_name,
                                    min_size=0,
                                    max_size=5)

        ### Test Create and describe Auto Scaling Policy
        self.up_policy_name = "Up-Policy-" + str(time.time())
        self.up_size = 4
        self.tester.create_as_policy(name=self.up_policy_name,
                                     adjustment_type="ChangeInCapacity",
                                     scaling_adjustment=4,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)
        if len(self.tester.autoscale.get_all_policies(policy_names=[self.up_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.up_policy_name +' not created')

        self.down_policy_name = "Down-Policy-" + str(time.time())
        self.down_size = -50
        self.tester.create_as_policy(name=self.down_policy_name,
                                     adjustment_type="PercentChangeInCapacity",
                                     scaling_adjustment=self.down_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(self.tester.autoscale.get_all_policies(policy_names=[self.down_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.down_policy_name +' not created')

        self.exact_policy_name = "Exact-Policy-" + str(time.time())
        self.exact_size = 0
        self.tester.create_as_policy(name=self.exact_policy_name,
                                     adjustment_type="ExactCapacity",
                                     scaling_adjustment=self.exact_size,
                                     as_name=self.auto_scaling_group_name,
                                     cooldown=120)

        if len(self.tester.autoscale.get_all_policies(policy_names=[self.exact_policy_name])) != 1:
            raise Exception('Auto Scaling policies: ' + self.exact_policy_name +' not created')

        self.debug("**** Created Auto Scaling Policies: " + self.up_policy_name + " " + self.down_policy_name + " " +
                   self.exact_policy_name)

        self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180)
        ### Test Execute ChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.up_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.up_size:
            raise Exception("Auto Scale Up not executed")
        self.debug("Executed  ChangeInCapacity policy, increased desired capacity to: " +
                   str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Execute PercentChangeInCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.down_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != 0.5 * self.up_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed PercentChangeInCapacity policy, decreased desired capacity to: " +
                   str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Execute ExactCapacity Auto Scaling Policy
        self.tester.execute_as_policy(policy_name=self.exact_policy_name,
                                      as_group=self.auto_scaling_group_name,
                                      honor_cooldown=False)
        if self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity != self.exact_size:
            raise Exception("Auto Scale down percentage not executed")
        self.debug("Executed ExactCapacity policy, exact capacity is: " +
                   str(self.tester.describe_as_group(self.auto_scaling_group_name).desired_capacity))

        self.tester.wait_for_result(self.scaling_activities_complete, True, timeout=180)

        ### Test Delete all Auto Scaling Policies
        self.tester.delete_all_policies()

        ### Test Delete Auto Scaling Group
        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None

        ### Test delete launch config
        self.tester.delete_launch_config(self.launch_config_name)

    def scaling_activities_complete(self):
        activities = self.asg.get_activities()
        for activity in activities:
            assert isinstance(activity,Activity)
            if activity.progress != 100:
                return False
        return True

    def AutoScalingInstanceBasics(self):
        """
        This case will test DescribeAutoScalingInstances, SetInstanceHealth and TerminateInstanceInAutoScalingGroup
        """
        pass

    def too_many_launch_configs_test(self):
        """
        AWS enforces a 100 LC per account limit this tests what happens if we create more
        """
        for i in range(101):
            self.launch_config_name = 'Test-Launch-Config-' + str(i + 1)
            self.tester.create_launch_config(name=self.launch_config_name,
                                             image_id=self.image.id)
            if len(self.tester.describe_launch_config()) > 100:
                raise Exception("More then 100 launch configs exist in 1 account")
        for lc in self.tester.describe_launch_config():
            self.tester.delete_launch_config(lc.name)

    def too_many_policies_test(self):
        """
        AWS enforces a 25 policy per account limit this tests what happens if we create more
        """
        launch_config_name = 'LC-' + str(time.time())
        self.tester.create_launch_config(name=launch_config_name,
                                         image_id=self.image.id,
                                         instance_type="m1.small",
                                         key_name=self.keypair.name,
                                         security_groups=[self.group.name])
        asg_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(group_name=asg_name,
                                    launch_config=launch_config_name,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=0,
                                    max_size=5)
        for i in range(26):
            policy_name = "Policy-" + str(i + 1)
            self.tester.create_as_policy(name=policy_name,
                                         adjustment_type="ExactCapacity",
                                         as_name=asg_name,
                                         scaling_adjustment=0,
                                         cooldown=120)
        if len(self.tester.autoscale.get_all_policies()) > 25:
            raise Exception("More than 25 policies exist for 1 auto scaling group")
        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None

    def too_many_as_groups(self):
        """
        AWS imposes a 20 ASG/acct limit
        """
        pass

    def clear_all(self):
        """

        remove ALL scaling policies, auto scaling groups and launch configs
        """
        self.tester.delete_all_policies()
        self.tester.delete_all_autoscaling_groups()
        self.tester.delete_all_launch_configs()

    def change_config(self):
        ### create initial launch configuration
        first_launch_config = 'First-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=first_launch_config, image_id=self.image.id, instance_type="m1.small")

        # create a replacement LC with different instance type
        second_launch_config = 'Second-Launch-Config-' + str(time.time())
        self.tester.create_launch_config(name=second_launch_config, image_id=self.image.id, instance_type="m1.large")

        ### create auto scale group
        auto_scaling_group_name = 'ASG-' + str(time.time())
        self.asg = self.tester.create_as_group(group_name=auto_scaling_group_name,
                                    launch_config=first_launch_config,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=1,
                                    max_size=4,
                                    desired_capacity=1)

        assert isinstance(self.asg, AutoScalingGroup)
        self.tester.update_as_group(group_name=self.asg.name,
                                    launch_config=second_launch_config,
                                    availability_zones=self.tester.get_zones(),
                                    min_size=1,
                                    max_size=4)
        def wait_for_instances(number=1):
            self.asg = self.tester.describe_as_group(self.asg.name)
            instances = self.asg.instances
            if not instances:
                self.tester.debug("No instances in ASG")
                return False
            if len(self.asg.instances) != number:
                self.tester.debug("Instances not yet allocated")
                return False
            for instance in instances:
                assert isinstance(instance, Instance)
                instance = self.tester.get_instances(idstring=instance.instance_id)[0]
                if instance.state != "running":
                    self.tester.debug("Instance: " + str(instance) + " still in " + instance.state + " state")
                    return False
                else:
                    self.tester.debug("Instance: " + str(instance) + " now running")
            return True

        self.tester.wait_for_result(wait_for_instances, True ,timeout=360)
        ### Set desired capacity
        new_desired = 2
        self.asg.set_capacity(new_desired)
        self.tester.wait_for_result(wait_for_instances, True, number=new_desired, timeout=360)
        #wait briefly before changing capacity
        # TODO  get new instance ID and get it's type verify correct type
        ### Delete Auto Scaling Group
        last_instance = self.tester.get_instances(idstring=self.tester.get_last_instance_id())[0]
        assert last_instance.instance_type == "m1.large"

        self.tester.wait_for_result(self.gracefully_delete, True)
        self.asg = None
        ### delete launch configs
        self.tester.delete_launch_config(first_launch_config)
        self.tester.delete_launch_config(second_launch_config)

    def gracefully_delete(self, asg = None):
            if not asg:
                asg = self.asg
            assert isinstance(asg, AutoScalingGroup)
            try:
                self.tester.delete_as_group(name=asg.name, force=True)
            except BotoServerError, e:
                if e.status == 400 and e.reason == "ScalingActivityInProgress":
                    return False
            return True
Beispiel #39
0
class ClusterBasics(unittest.TestCase):
    def setUp(self):

        if options.config_file:
            self.tester = Eucaops(config_file=options.config_file, password=options.clc_password)
        else:
            print "\tNeed to pass --config_file option. Try --help for more information\n"
            exit(1)

        ## If specific image wants to be tested, use that; if not, use any instance-store backed image.
        if options.image:
            self.image = self.tester.get_emi(emi=options.image)
        else:
            self.image = self.tester.get_emi(root_device_type="instance-store")

        self.keypair = self.tester.add_keypair(options.prefix + "-" + str(time.time()))
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"

        ### Identify type of instance to run
        if options.type == "random":
            options.type = random.choice(["m1.small", "c1.medium", "m1.large", "m1.xlarge", "c1.xlarge"])

        ### Identify number of instances to run (i.e. number of security groups)
        self.num_vms = self.tester.get_available_vms(options.type)

        if self.num_vms >= options.number:
            self.available = options.number
        else:
            options.type = "m1.small"
            avail_vms = self.tester.get_available_vms(options.type)
            if avail_vms < options.number:
                self.tester.fail("Not enough m1.small vm types to run test with minimal of 3 security groups.")
            else:
                self.available = options.number

        if options.print_debug:
            self.tester.start_euca_logs()

        self.security_groups = []
        self.reservations = []

    def tearDown(self):
        ### Clean up after running test case
        for reservation in self.reservations:
            self.tester.terminate_instances(reservation)
        for security_group in security_groups:
            self.tester.delete_group(security_group.name)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.keypath = None
        self.keypair = None
        self.image = None
        self.num_vms = None
        self.available = None
        self.security_groups = None

        if options.print_debug:
            self.tester.stop_euca_logs()
            self.tester.save_euca_logs()

        self.tester = None

    ### Test Cases ###
    def iptables_Cruft(self):
        ### Launch number of instances based upon number of security groups wanting to be tested.
        ### Take snapshot of iptables before creating security groups and launching instances.
        ### Use service manager to get to enabled CC to get iptables rules
        partition = self.tester.service_manager.partitions.keys()
        part = list(partition)[0]
        main_part = self.tester.service_manager.partitions.get(part)
        cc_machine = main_part.get_enabled_cc()
        cc_shell = self.tester.create_ssh(hostname=cc_machine.hostname, password=options.cc_password)
        pre_stdin, pre_iptables, pre_stderr = cc_shell.exec_command(
            'iptables-save | grep -v "#" | grep -v "\:PRE" | grep -v "\:POST" | grep -v "\:INPUT" | grep -v "\:FORWARD" | grep -v "\:OUT"'
        )

        self.pre_iptables = list(pre_iptables)

        self.assertTrue(pre_stderr, "pre_Iptables_Snapshot failed.")

        ### Create security group for number of security groups we want to test.

        while self.available > 0:
            ### Create unique security group and authorize SSH and PING
            sec_group = self.tester.add_group(group_name=options.prefix + "-" + str(time.time()))
            self.assertNotEqual(len(sec_group.name), 0, "Could not create group.")
            self.assertTrue(
                self.tester.authorize_group_by_name(group_name=sec_group.name), "Could not authorize group for SSH"
            )
            self.assertTrue(
                self.tester.authorize_group_by_name(group_name=sec_group.name, port=-1, protocol="icmp"),
                "Could not authorize group for PING",
            )
            self.security_groups.append(sec_group)

            ### Launch instance for the unique security group
            try:
                reservation = self.tester.run_instance(
                    self.image, keypair=self.keypair.name, group=sec_group.name, type=options.type
                )
            except Exception, e:
                self.fail("Caught an exception when running the instance: " + str(e))

            self.reservations.append(reservation)

            ### Decrement count of security groups and instances left to create
            self.available -= 1

        ### Loop through and terminate instances
        ### Grab total number of instances ran from by test case
        ### Terminate each instance
        for reservation in self.reservations:
            self.assertTrue(self.tester.terminate_instances(reservation), "Failure when terminating instance.")

        ### Loop through and delete security groups
        for group in self.security_groups:
            self.assertTrue(self.tester.delete_group(group), "Failure when deleting group " + group.name)

        ### Take snapshot of iptables after deleting security groups and terminating instances.
        ### Use service manager to get to enabled CC to get iptables rules
        partition = self.tester.service_manager.partitions.keys()
        part = list(partition)[0]
        main_part = self.tester.service_manager.partitions.get(part)
        cc_machine = main_part.get_enabled_cc()
        cc_shell = self.tester.create_ssh(hostname=cc_machine.hostname, password=options.cc_password)
        post_stdin, post_iptables, post_stderr = cc_shell.exec_command(
            'iptables-save | grep -v "#" | grep -v "\:PRE" | grep -v "\:POST" | grep -v "\:INPUT" | grep -v "\:FORWARD" | grep -v "\:OUT"'
        )

        self.post_iptables = list(post_iptables)

        self.assertTrue(post_stderr, "post_Iptables_Snapshot failed.")

        ### Evaluate pre and post iptables outputs to see if there is a difference.
        if len(self.pre_iptables) != len(self.post_iptables):
            ## Get different lines and print them
            iptables_diff = set(self.post_iptables) - set(self.pre_iptables)
            pp = pprint.PrettyPrinter(indent=4)

            self.tester.critical("\n======================================\n")
            self.tester.critical("Diffences between iptables snapshots: ")
            self.tester.critical("PRE-IPTABLES SNAPSHOT LENGTH: %i", len(self.pre_iptables))
            self.tester.critical("POST-IPTABLES SNAPSHOT LENGTH: %i", len(self.post_iptables))
            self.tester.critical("\n---------------------------------------\n")
            pp.pprint(list(iptables_diff))
            self.tester.critical("\n======================================\n")
Beispiel #40
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        if not boto.config.has_section('Boto'):
            boto.config.add_section('Boto')
            boto.config.set('Boto', 'num_retries', '1')
            boto.config.set('Boto', 'http_socket_timeout', '20')
        self.tester = Eucaops(config_file=self.args.config_file,
                              password=self.args.password)
        self.tester.ec2.connection.timeout = 30
        self.servman = self.tester.service_manager
        self.instance_timeout = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" +
                                               self.start_time)
            self.tester.authorize_group_by_name(group_name=self.group.name)
            self.tester.authorize_group_by_name(group_name=self.group.name,
                                                port=-1,
                                                protocol="icmp")
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair("keypair-" +
                                                   self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            if self.args.emi:
                self.image = self.tester.get_emi(self.args.emi)
            else:
                self.image = self.tester.get_emi(
                    root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc(
            ).machine
            self.version = self.tester.clc.sys(
                "cat " + self.tester.eucapath +
                "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(
                image=self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(
                self.volume)
            for instance in self.standing_reservation.instances:
                instance.sys("echo " + instance.id + " > " + self.device)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(
                self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(
                self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(
                self.standing_bucket_name, self.standing_key_name)
            self.run_instance_params = {
                'image': self.image,
                'keypair': self.keypair.name,
                'group': self.group.name,
                'zone': self.zone,
                'timeout': self.instance_timeout
            }
        except Exception, e:
            self.clean_method()
            raise Exception("Init for testcase failed. Reason: " + str(e))
Beispiel #41
0
class MigrationTest(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.parser.add_argument('--imgurl',
                                 help="BFEBS Image to splat down",
                                 default=None)
        self.get_args()
        self.tester = Eucaops(config_file=self.args.config,
                              password=self.args.password)

        self.clusters = self.tester.service_manager.get_all_cluster_controllers(
        )
        for cluster in self.clusters:
            self.nodes = self.tester.service_manager.get_all_node_controllers(
                part_name=cluster.partition)
            if len(self.nodes) < 2:
                self.tester.debug("Not enough NCs in partition '" +
                                  cluster.partition +
                                  "' to test instance migration.")
                exit(0)
        # TODO
        if len(self.clusters) > 1:
            self.tester.debug(
                "TBD: handle multiple clusters during instance migration tests"
            )
            exit(0)

        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")

        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.numberOfResources = 3
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        try:
            self.tester.get_emi(root_device_type="ebs")
        except:
            bfebs = self.do_with_args(BFEBSBasics)
            bfebs.RegisterImage()

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def MigrationBasic(self, volume=None):
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.reservation = self.tester.run_instance(
            self.image,
            username=self.args.instance_user,
            keypair=self.keypair.name,
            group=self.group.name,
            zone=self.zone)
        instance = self.reservation.instances[0]
        assert isinstance(instance, EuInstance)
        volume_device = None
        if volume is not None:
            volume_device = instance.attach_euvolume(volume)

        self.tester.service_manager.populate_nodes()
        source_nc = self.tester.service_manager.get_all_node_controllers(
            instance_id=instance.id)[0]
        enabled_clc.sys("source " + self.tester.credpath + "/eucarc &&" +
                        " euserv-migrate-instances -i " + instance.id,
                        code=0)

        def wait_for_new_nc():
            self.tester.service_manager.populate_nodes()
            destination_nc = self.tester.service_manager.get_all_node_controllers(
                instance_id=instance.id)[0]
            return source_nc.hostname == destination_nc.hostname

        self.tester.wait_for_result(wait_for_new_nc,
                                    False,
                                    timeout=600,
                                    poll_wait=60)
        self.assertTrue(self.tester.ping(instance.public_dns_name),
                        'Could not ping instance')

        if volume_device:
            instance.sys("ls " + volume_device, code=0)

        destination_nc = self.tester.service_manager.get_all_node_controllers(
            instance_id=instance.id)[0]
        if destination_nc.machine.distro.name is not "vmware":
            destination_nc.machine.sys("virsh list | grep " + instance.id,
                                       code=0)
        else:
            destination_nc.machine.sys("esxcli vm process list | grep " +
                                       instance.id,
                                       code=0)

        self.tester.terminate_instances(reservation=self.reservation)
        if volume is not None:
            self.tester.delete_volume(volume)

    def MigrationInstanceStoreWithVol(self):
        volume = self.tester.create_volume(zone=self.zone)
        assert isinstance(volume, EuVolume)
        self.MigrationBasic(volume)

    def MigrationBasicEBSBacked(self, volume=None):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.MigrationBasic(volume)

    def MigrationBasicEBSBackedWithVol(self):
        volume = self.tester.create_volume(zone=self.zone)
        assert isinstance(volume, EuVolume)
        self.MigrationBasicEBSBacked(volume)

    def MigrateToDest(self):
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.reservation = self.tester.run_instance(
            self.image,
            username=self.args.instance_user,
            keypair=self.keypair.name,
            group=self.group.name,
            zone=self.zone)
        instance = self.reservation.instances[0]
        self.tester.service_manager.populate_nodes()
        self.source_nc = self.tester.service_manager.get_all_node_controllers(
            instance_id=instance.id)[0]

        all_nc = self.tester.service_manager.get_all_node_controllers()
        self.destination_nc = None

        for nc in all_nc:
            if nc.machine.hostname != self.source_nc.machine.hostname:
                self.destination_nc = nc
                enabled_clc.sys(
                    "source " + self.tester.credpath + "/eucarc && " +
                    " euserv-migrate-instances -i " + instance.id +
                    " --include-dest " + self.destination_nc.machine.hostname,
                    code=0)

                def wait_for_new_nc():
                    self.tester.service_manager.populate_nodes()
                    self.instance_node = self.tester.service_manager.get_all_node_controllers(
                        instance_id=instance.id)[0]
                    return self.instance_node.hostname == self.destination_nc.hostname

                self.tester.wait_for_result(wait_for_new_nc,
                                            True,
                                            timeout=600,
                                            poll_wait=60)
                self.assertTrue(self.tester.ping(instance.public_dns_name),
                                'Could not ping instance')

        # migrate the instance to it's original source node
        self.destination_nc = self.source_nc
        enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " +
                        " euserv-migrate-instances -i " + instance.id +
                        " --include-dest " +
                        self.destination_nc.machine.hostname,
                        code=0)

        self.tester.wait_for_result(wait_for_new_nc,
                                    True,
                                    timeout=600,
                                    poll_wait=60)
        self.assertTrue(self.tester.ping(instance.public_dns_name),
                        'Could not ping instance')

        self.tester.terminate_instances(reservation=self.reservation)

    def MigrationToDestEBSBacked(self):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.MigrateToDest()

    def EvacuateNC(self, volume_list=[]):
        instance_list = []
        enabled_clc = self.tester.service_manager.get_enabled_clc().machine
        self.nodes = self.tester.service_manager.populate_nodes()
        # pop out one NC to fill in
        self.source_nc = self.nodes.pop()

        def set_state(node, state):
            # retrying, see EUCA-6389
            while node.state != state:
                self.tester.debug(node.hostname + ": SET STATE TO " + state)
                enabled_clc.sys("euca-modify-service -s " + state + " " +
                                node.hostname,
                                code=0)
                self.tester.sleep(10)
                tmpnodes = self.tester.service_manager.populate_nodes()
                for tmpnode in tmpnodes:
                    if tmpnode.hostname == node.hostname:
                        node = tmpnode

        # stop all the NCs
        for node in self.nodes:
            set_state(node, "STOPPED")

        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = self.tester.run_instance(
            self.image,
            min=3,
            max=3,
            username=self.args.instance_user,
            keypair=self.keypair.name,
            group=self.group.name,
            zone=self.zone)

        for i in xrange(3):
            instance = self.reservation.instances[i]
            instance_list.append(instance)
            assert isinstance(instance, EuInstance)
            volume_device = None
            if volume_list:
                volume_device = instance.attach_euvolume(volume_list[i])

        self.nodes = self.tester.service_manager.populate_nodes()
        # start all the NCs
        for node in self.nodes:
            if node.hostname is not self.source_nc.hostname:
                set_state(node, "ENABLED")

        self.nodes = self.tester.service_manager.populate_nodes()
        # evacuate source NC
        enabled_clc.sys("source " + self.tester.credpath + "/eucarc && " +
                        " euserv-migrate-instances -s " +
                        self.source_nc.machine.hostname,
                        code=0)

        def wait_for_evacuation():
            self.tester.service_manager.populate_nodes()
            if self.source_nc.machine.distro.name is "vmware":
                emptyNC = self.source_nc.sys(
                    "esxcli vm process list | grep 'Display Name' | awk '{print $3}'"
                )
            else:
                emptyNC = self.source_nc.get_virsh_list()
            return len(emptyNC) == 0

        self.tester.wait_for_result(wait_for_evacuation,
                                    True,
                                    timeout=600,
                                    poll_wait=60)

        for inst in instance_list:
            self.assertTrue(self.tester.ping(inst.public_dns_name),
                            'Could not ping instance')

        self.tester.terminate_instances(reservation=self.reservation)
        if volume_list:
            self.tester.delete_volumes(volume_list)

    def EvacuateNCWithVol(self):
        volume_list = []
        for i in xrange(self.numberOfResources):
            volume = self.tester.create_volume(zone=self.zone)
            assert isinstance(volume, EuVolume)
            volume_list.append(volume)
        self.EvacuateNC(volume_list)

    def EvacuateNCAllEBS(self):
        self.image = self.tester.get_emi(root_device_type="ebs")
        self.EvacuateNC()
    #Number of ping attempts used to test instance state, before giving up on a running instance
    ping_retry = 100
    #The eutester cloud tester object
    tester = Eucaops(password=password, config_file=config, credpath=credpath)

    #sets tester to throw exception upon failure
    if (options.eof):
        tester.exit_on_fail = 1
    else:
        tester.exit_on_fail = 0

    try:
        ### Create security group if it does not exist. Add ssh authorization to it.
        try:
            group = tester.add_group(group_name)
            tester.authorize_group_by_name(group.name)
            tester.authorize_group_by_name(group.name,
                                           protocol="icmp",
                                           port=-1)
        except Exception, e:
            raise Exception("Error when setting up group:" + str(group_name) +
                            ", Error:" + str(e))

        #Get the remote file size from the http header of the url given
        try:
            url = url.replace('http://', '')
            host = url.split('/')[0]
            path = url.replace(host, '')
            pmsg("get_remote_file, host(" + host + ") path(" + path + ")")
            conn = httplib.HTTPConnection(host)
            conn.request("HEAD", path)
Beispiel #43
0
class InstanceRestore(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops( config_file=config_file, password=password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        self.cur_time = str(int(time.time()))
        self.ncs = self.tester.get_component_machines("nc")

    def clean_method(self):
        ncs = self.tester.get_component_machines("nc")
        for nc in ncs:
            nc.sys("service eucalyptus-nc start")

        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

        ### RESET vmstate properties
        self.modify_property("cloud.vmstate.instance_timeout","60")
        self.modify_property("cloud.vmstate.terminated_time","60")
        for nc in self.ncs:
            nc.sys("service eucalyptus-nc start")

    def restore_logic(self):
        self.modify_property("cloud.vmstate.instance_timeout","1")
        self.modify_property("cloud.vmstate.terminated_time","1")

        self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone)

        for nc in self.ncs:
            nc.sys("service eucalyptus-nc stop")

        ### Wait for instance to show up as terminating
        self.tester.wait_for_reservation(self.reservation, state="terminated", timeout=600)

        ### Wait for reservation to disappear
        while len(self.tester.get_instances(reservation=self.reservation)) > 0:
            self.tester.sleep(30)

        self.tester.deregister_image(self.image, clear=True)

        for nc in self.ncs:
            nc.sys("service eucalyptus-nc start")

        self.tester.wait_for_reservation(self.reservation, state="running", timeout=600)
        for instance in self.reservation.instances:
            instance.sys("uname -r", code=0)

    def modify_property(self, property, value):
        """
        Modify a eucalyptus property through the command line euca-modify-property tool
        property        Property to modify
        value           Value to set it too
        """
        command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(property) + "=" + str(value)
        if self.clc.found(command, property):
            self.debug("Properly modified property " + property)
        else:
            raise Exception("Setting property " + property + " failed")
Beispiel #44
0
class InstanceBasics(EutesterTestCase):
    def __init__(self):
        #### Pre-conditions
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        # Setup basic eutester object
        self.tester = Eucaops(credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")

        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = "%s/%s.pem" % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None

    def clean_method(self):
        ### Terminate the reservation if it is still up
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")

        ### DELETE group
        self.tester.delete_group(self.group)

        ### Delete keypair in cloud and from filesystem
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def MyTest(self):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        if not self.reservation:
            self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name)
        for instance in self.reservation.instances:
            self.assertTrue(self.tester.wait_for_reservation(self.reservation), "Instance did not go to running")
            self.assertNotEqual(
                instance.public_dns_name, instance.private_ip_address, "Public and private IP are the same"
            )
            self.assertTrue(self.tester.ping(instance.public_dns_name), "Could not ping instance")
            self.assertFalse(
                instance.found("ls -1 /dev/" + instance.rootfs_device + "2", "No such file or directory"),
                "Did not find ephemeral storage at " + instance.rootfs_device + "2",
            )
Beispiel #45
0
class InstanceBasics(unittest.TestCase):
    def setUp(self, credpath=None):
        # Setup basic eutester object
        if credpath is None:
            credpath = arg_credpath
        self.tester = Eucaops(credpath=credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

    def tearDown(self):
        if self.reservation is not None:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.reservation = None
        self.group = None
        self.keypair = None
        self.tester = None
        self.ephemeral = None

    def BasicInstanceChecks(self, zone=None):
        """Instance checks including reachability and ephemeral storage"""
        if zone is None:
            zone = self.zone
        if self.reservation is None:
            self.reservation = self.tester.run_instance(
                self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=zone)
        for instance in self.reservation.instances:
            self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                            'Instance did not go to running')
            self.assertNotEqual(instance.public_dns_name,
                                instance.private_ip_address,
                                'Public and private IP are the same')
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            'Could not ping instance')
            self.assertFalse(
                instance.found("ls -1 /dev/" + instance.rootfs_device + "2",
                               "No such file or directory"),
                'Did not find ephemeral storage at ' + instance.rootfs_device +
                "2")
        return self.reservation

    def ElasticIps(self, zone=None):
        """ Basic test for elastic IPs
            Allocate an IP, associate it with an instance, ping the instance
            Disassociate the IP, ping the instance
            Release the address"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone)
        for instance in self.reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address, 'Unable to allocate address')
            self.tester.associate_address(instance, address)
            instance.update()
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(address)
            instance.update()
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            "Could not ping after dissassociate")
        return self.reservation

    def MaxSmallInstances(self, available_small=None, zone=None):
        """Run the maximum m1.smalls available"""
        if available_small is None:
            available_small = self.tester.get_available_vms()
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    min=available_small,
                                                    max=available_small,
                                                    zone=zone)
        self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                        'Not all instances  went to running')
        return self.reservation

    def LargestInstance(self, zone=None):
        """Run 1 of the largest instance c1.xlarge"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    type="c1.xlarge",
                                                    zone=zone)
        self.assertTrue(self.tester.wait_for_reservation(self.reservation),
                        'Not all instances  went to running')
        return self.reservation

    def MetaData(self, zone=None):
        """Check metadata for consistency"""
        # Missing nodes
        # ['block-device-mapping/',  'ami-manifest-path']
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone)
        for instance in self.reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-keys/0/openssh-key")
                    [0].split('eucalyptus.')[-1], self.keypair.name),
                'Incorrect public key in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("security-groups")[0],
                    self.group.name), 'Incorrect security group in metadata')
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], ""))
            self.assertTrue(
                re.match(instance.get_metadata("instance-id")[0], instance.id),
                'Incorrect instance id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-ipv4")[0],
                    instance.private_ip_address),
                'Incorrect private ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-ipv4")[0],
                    instance.ip_address), 'Incorrect public ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-id")[0], instance.image_id),
                'Incorrect ami id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-launch-index")[0],
                    instance.ami_launch_index),
                'Incorrect launch index in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("reservation-id")[0],
                    self.reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("placement/availability-zone")[0],
                    instance.placement),
                'Incorrect availability-zone in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("kernel-id")[0], instance.kernel),
                'Incorrect kernel id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-hostname")[0],
                    instance.public_dns_name),
                'Incorrect public host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-hostname")[0],
                    instance.private_dns_name),
                'Incorrect private host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("hostname")[0], instance.dns_name),
                'Incorrect host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ramdisk-id")[0], instance.ramdisk),
                'Incorrect ramdisk in metadata')  #instance-type
            self.assertTrue(
                re.match(
                    instance.get_metadata("instance-type")[0],
                    instance.instance_type),
                'Incorrect instance type in metadata')
            BAD_META_DATA_KEYS = ['foobar']
            for key in BAD_META_DATA_KEYS:
                self.assertTrue(
                    re.search("Not Found",
                              "".join(instance.get_metadata(key))),
                    'No fail message on invalid meta-data node')
        return self.reservation

    def DNSResolveCheck(self, zone=None):
        """Check DNS resolution information for public/private DNS names and IP addresses.  The DNS resolution behavior follows AWS EC2."""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured #
            if re.match("internal",
                        instance.private_dns_name.split('eucalyptus.')[-1]):
                # Per AWS standard, resolution should have private hostname or private IP as a valid response
                # Perform DNS resolution against private IP and private DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("hostname")[0])[3]),
                    "DNS lookup failed for hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("hostname")[0])[5]),
                    "Incorrect DNS resolution for hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("local-hostname")[0])[3]),
                    "DNS lookup failed for private hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("local-hostname")[0])[5]),
                    "Incorrect DNS resolution for private hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("local-ipv4")[0])
                        [3]), "DNS lookup failed for private IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-hostname")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("local-ipv4")[0])
                        [4]),
                    "Incorrect DNS resolution for private IP address")
                # Perform DNS resolution against public IP and public DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("public-hostname")[0])[3]),
                    "DNS lookup failed for public-hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address
                self.assertTrue(
                    re.search(
                        instance.get_metadata("local-ipv4")[0],
                        instance.sys(
                            "nslookup " +
                            instance.get_metadata("public-hostname")[0])[5]),
                    "Incorrect DNS resolution for public-hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(
                    re.search(
                        'answer\:',
                        instance.sys("nslookup " +
                                     instance.get_metadata("public-ipv4")[0])
                        [3]), "DNS lookup failed for public IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname
                self.assertTrue(
                    re.search(
                        instance.get_metadata("public-hostname")[0],
                        instance.sys("nslookup " +
                                     instance.get_metadata("public-ipv4")[0])
                        [4]), "Incorrect DNS resolution for public IP address")

        return self.reservation

    def DNSCheck(self, zone=None):
        """Check to make sure Dynamic DNS reports correct information for public/private IP address and DNS names"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone)
        for instance in self.reservation.instances:

            # Test to see if Dynamic DNS has been configured #
            if re.match("internal",
                        instance.private_dns_name.split('eucalyptus.')[-1]):
                # Make sure that private_ip_address is not the same as local-hostname
                self.assertFalse(
                    re.match(instance.private_ip_address,
                             instance.private_dns_name),
                    'local-ipv4 and local-hostname are the same with DNS on')
                # Make sure that ip_address is not the same as public-hostname
                self.assertFalse(
                    re.match(instance.ip_address, instance.public_dns_name),
                    'public-ipv4 and public-hostname are the same with DNS on')

        return self.reservation

    def Reboot(self, zone=None):
        """Reboot instance ensure IP connectivity and volumes stay attached"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone)
        for instance in self.reservation.instances:
            ### Create 1GB volume in first AZ
            self.volume = self.tester.create_volume(instance.placement, 1)
            euvolume = EuVolume.make_euvol_from_vol(self.volume)
            self.volume_device = instance.attach_euvolume(euvolume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(euvolume)
        return self.reservation

    def Churn(self, testcase="BasicInstanceChecks"):
        """Start instances and stop them before they are running, increase time to terminate on each iteration"""
        from multiprocessing import Process
        from multiprocessing import Queue
        ### Increase time to terminate by step seconds on each iteration
        step = 10

        ## Run through count iterations of test
        count = self.tester.get_available_vms("m1.small") / 2
        thread_pool = []
        queue_pool = []

        ## Start asynchronous activity
        ## Run 5 basic instance check instances 10s apart
        for i in xrange(count):
            q = Queue()
            queue_pool.append(q)
            p = Process(target=self.run_testcase_thread,
                        args=(q, step * i, testcase))
            thread_pool.append(p)
            self.tester.debug("Starting Thread " + str(i) + " in " +
                              str(step * i))
            p.start()

        ### While the other tests are running, run and terminate count instances with a 10s sleep in between
        for i in xrange(count):
            self.reservation = self.image.run()
            self.tester.debug("Sleeping for " + str(step) +
                              " seconds before terminating instances")
            self.tester.sleep(step)
            for instance in self.reservation.instances:
                instance.terminate()
                self.assertTrue(
                    self.tester.wait_for_instance(instance, "terminated"),
                    "Instance did not go to terminated")

        ### Once the previous test is complete rerun the BasicInstanceChecks test case
        ### Wait for an instance to become available
        count = self.tester.get_available_vms("m1.small")
        poll_count = 30
        while poll_count > 0:
            self.tester.sleep(5)
            count = self.tester.get_available_vms("m1.small")
            if count > 0:
                self.tester.debug(
                    "There is an available VM to use for final test")
                break
            poll_count -= 1

        fail_count = 0
        ### Block until the script returns a result
        for queue in queue_pool:
            test_result = queue.get(True)
            self.tester.debug("Got Result: " + str(test_result))
            fail_count += test_result

        for thread in thread_pool:
            thread.join()

        if fail_count > 0:
            raise Exception("Failure detected in one of the " + str(count) +
                            " Basic Instance tests")

        self.tester.debug("Successfully completed churn test")

    def PrivateIPAddressing(self, zone=None):
        """Basic test to run an instance with Private only IP
           and later allocate/associate/diassociate/release 
           an Elastic IP. In the process check after diassociate
           the instance has only got private IP or new Public IP
           gets associated to it"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    private_addressing=True,
                                                    zone=zone)
        for instance in self.reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address, 'Unable to allocate address')
            self.assertTrue(self.tester.associate_address(instance, address))
            self.tester.sleep(30)
            instance.update()
            self.assertTrue(self.tester.ping(instance.public_dns_name),
                            "Could not ping instance with new IP")
            address.disassociate()
            self.tester.sleep(30)
            instance.update()
            self.assertFalse(
                self.tester.ping(instance.public_dns_name),
                "Was able to ping instance that should have only had a private IP"
            )
            address.release()
            if instance.public_dns_name != instance.private_dns_name:
                self.fail("Instance received a new public IP: " +
                          instance.public_dns_name)
        return self.reservation

    def ReuseAddresses(self, zone=None):
        """ Run instances in series and ensure they get the same address"""
        prev_address = None
        if zone is None:
            zone = self.zone
        ### Run the test 5 times in a row
        for i in xrange(5):
            self.reservation = self.tester.run_instance(
                keypair=self.keypair.name, group=self.group.name, zone=zone)
            for instance in self.reservation.instances:
                if prev_address is not None:
                    self.assertTrue(
                        re.search(str(prev_address),
                                  str(instance.public_dns_name)),
                        str(prev_address) +
                        " Address did not get reused but rather  " +
                        str(instance.public_dns_name))
                prev_address = instance.public_dns_name
            self.tester.terminate_instances(self.reservation)

    def run_testcase_thread(self, queue, delay=20, name="MetaData"):
        ### Thread that runs a testcase (function) and returns its pass or fail result
        self.tester.sleep(delay)
        try:
            result = unittest.TextTestRunner(verbosity=2).run(
                InstanceBasics(name))
        except Exception, e:
            queue.put(1)
            raise e
        if result.wasSuccessful():
            self.tester.debug("Passed test: " + name)
            queue.put(0)
            return False
        else:
            self.tester.debug("Failed test: " + name)
            queue.put(1)
            return True
Beispiel #46
0
class Instances(unittest.TestCase):
    def setUp(self):
        # Setup basic eutester object
        
        self.tester = Eucaops( config_file="../input/2b_tested.lst", password="******", credpath="../credentials")
        self.tester.poll_count = 240
        self.tester.start_euca_logs()
        
        ### Determine whether virtio drivers are being used
        self.device_prefix = "sd"
        if self.tester.hypervisor == "kvm":
            self.device_prefix = "vd"
        self.ephemeral = "/dev/" + self.device_prefix + "a2"
        
        ### Adda and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name)
        self.tester.sleep(10)
    
    def tearDown(self):
        """Stop Euca logs""" 
        self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.tester.stop_euca_logs()
        self.tester.save_euca_logs()
        self.reservation = None
        self.group = None
        self.keypair = None
        self.tester = None
        self.ephemeral = None
    
    def test1_Instance(self):
        """Instance checks including reachability and ephemeral storage"""
        for instance in self.reservation.instances:
            self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running')
            self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same')
            self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance')
            instance_ssh = Eucaops( hostname=instance.public_dns_name,  keypath= self.keypath)
            self.assertTrue( instance_ssh.found("ls -1 " + self.ephemeral,  self.ephemeral),  'Did not find ephemeral storage at ' + self.ephemeral)
            self.assertTrue( self.tester.terminate_instances(self.reservation), 'Failure when terminating instance')
    
    def test2_ElasticIps(self):
        """ Basic test for elastic IPs"""
        for instance in self.reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address,'Unable to allocate address')
            self.assertTrue(self.tester.associate_address(instance, address))
            self.tester.sleep(30)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP")
            address.disassociate()
            self.tester.sleep(30)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP")
            self.tester.release_address()
    
    def test3_MaxInstances(self):
        """Run the maximum m1.smalls available"""
        self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance")
        available_small = self.tester.get_available_vms()
        self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small)
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
    
    def test4_LargeInstance(self):
        """Run 1 of the largest instance c1.xlarge"""
        self.assertTrue(self.tester.terminate_instances(self.reservation), "Was not able to terminate original instance")
        self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,type="c1.xlarge")
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
    
    def test5_MetaData(self):
        """Check metadata for consistency"""
        # Missing nodes
        # ['block-device-mapping/',  'ami-manifest-path' , 'hostname',  'placement/']   
        for instance in self.reservation.instances:
            instance_ssh = Eucaops( hostname=instance.public_dns_name,  keypath= self.keypath)
            ### Check metadata service
            self.assertTrue(re.search(instance_ssh.get_metadata("public-keys/0/")[0], self.keypair.name))
            self.assertTrue(re.search(instance_ssh.get_metadata("security-groups")[0], self.group)) 
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) 
            self.assertTrue(re.search(instance_ssh.get_metadata("instance-id")[0], instance.id))  
            self.assertTrue(re.search(instance_ssh.get_metadata("local-ipv4")[0] , instance.private_ip_address))
            self.assertTrue(re.search(instance_ssh.get_metadata("public-ipv4")[0] , instance.ip_address))          
            self.assertTrue(re.search(instance_ssh.get_metadata("ami-id")[0], instance.image_id))
            self.assertTrue(re.search(instance_ssh.get_metadata("ami-launch-index")[0], instance.ami_launch_index))
            self.assertTrue(re.search(instance_ssh.get_metadata("reservation-id")[0], self.reservation.id))
            self.assertTrue(re.search(instance_ssh.get_metadata("kernel-id")[0], instance.kernel))
            self.assertTrue(re.search(instance_ssh.get_metadata("public-hostname")[0], instance.public_dns_name))
            self.assertTrue(re.search(instance_ssh.get_metadata("ramdisk-id")[0], instance.ramdisk )) #instance-type
            self.assertTrue(re.search(instance_ssh.get_metadata("instance-type")[0], instance.instance_type ))
           
    def test6_Reboot(self):
        """Reboot instance ensure IP connectivity and volumes stay attached"""
        for instance in self.reservation.instances:
            ### Create 1GB volume in first AZ
            volume = self.tester.create_volume(self.tester.ec2.get_all_zones()[0].name)
            
            ### Pass in check the devices on the instance before the attachment
            device_path = "/dev/" + self.device_prefix  +"j"
            instance_ssh = Eucaops( hostname=instance.public_dns_name,  keypath= self.keypath)
            before_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix)
            
            ### Attach the volume to the instance
            self.assertTrue(self.tester.attach_volume(instance, volume, device_path), "Failure attaching volume")
            
            ### Check devices after attachment
            after_attach = instance_ssh.sys("ls -1 /dev/ | grep " + self.device_prefix)
            new_devices = self.tester.diff(after_attach, before_attach)
            
            ### Check for device in instance
            self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance before reboot")
            
            ### Reboot instance
            instance.reboot()
            self.tester.sleep(30)
            
            ### Check for device in instance
            instance_ssh = Eucaops( hostname=instance.public_dns_name,  keypath= self.keypath)
            self.assertTrue(instance_ssh.check_device("/dev/" + new_devices[0]), "Did not find device on instance after reboot")
            self.assertTrue(self.tester.detach_volume(volume), "Unable to detach volume")
            self.assertTrue(self.tester.delete_volume(volume), "Unable to delete volume")
        
    def suite():
        tests = ['test1_Instance', 'test2_ElasticIps', 'test3_MaxInstances', 'test4_LargeInstance','test5_MetaData', 'test6_Reboot']
        return unittest.TestSuite(map(Instances, tests))
class SSLTermination(EutesterTestCase):
    def __init__(self, extra_args= None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)

        # test resource hash
        self.test_hash = str(int(time.time()))

        # test resources dir
        self.resource_dir = "./testcases/cloud_user/elb/test_data"

        # User data file
        self.user_data = self.resource_dir+"/webserver_user_data.sh"

        # Add and authorize a group for the instances
        self.group = self.tester.add_group(group_name="group-" + self.test_hash)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp")
        self.tester.authorize_group_by_name(group_name=self.group.name, port=80, protocol="tcp")
        self.tester.authorize_group_by_name(group_name=self.group.name, port=443, protocol="tcp")

        # Generate a keypair for the instances
        self.keypair = self.tester.add_keypair("keypair-" + self.test_hash)
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        # Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi()

        # Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        # create base load balancer
        self.load_balancer_port = 80
        self.load_balancer = self.tester.create_load_balancer(zones=[self.zone],
                                                              name="elb-" + self.test_hash,
                                                              load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)

        # create autoscaling group of webservers that register to the load balancer
        self.count = 1
        (self.web_servers) = self.tester.create_as_webservers(name=self.test_hash,
                                                              keypair=self.keypair.name,
                                                              group=self.group.name,
                                                              zone=self.zone,
                                                              image=self.image.id,
                                                              count=self.count,
                                                              user_data=self.user_data,
                                                              load_balancer=self.load_balancer.name)

        # web servers scaling group
        self.asg = self.tester.describe_as_group(name="asg-"+self.test_hash)

        # wait until scaling instances are InService with the load balancer before continuing - 5 min timeout
        assert self.tester.wait_for_result(self.tester.wait_for_lb_instances, True, timeout=300,
                                           lb=self.load_balancer.name, number=self.count)

    def ssl_termination(self):
        """
        This will test ELB with HTTPS listener for the front end elb connection.

        @raise Exception:
        """
        self.debug("ELB SSl test")

        # get ELB ip info and setup url (also verifies DNS lookup)
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "https://{0}/instance-name".format(lb_ip)

        # upload server certificate
        frontend_cert_name = "elb-ssl-test-"+str(int(time.time()))
        self.tester.add_server_cert(cert_name=frontend_cert_name)

        # remove any existing listeners
        self.remove_all_listeners(lb_name=self.load_balancer.name)

        """create a new listener on HTTPS port 443 and remove listener on port 80"""
        cert_arn = self.tester.get_server_cert(self.cert_name).arn
        listener = (443, 80, "HTTPS", "HTTP", cert_arn)
        self.tester.add_lb_listener(lb_name=self.load_balancer.name, listener=listener)
        self.tester.remove_lb_listener(lb_name=self.load_balancer.name, port=self.load_balancer_port)
        self.tester.update_listener(lb=self.load_balancer,
                                    lb_port=443,
                                    lb_protocol="HTTPS",
                                    instance_port=80,
                                    instance_protocol="HTTP",
                                    cert_arn=frontend_cert_arn)

        # perform https requests to LB
        self.tester.sleep(10)
        self.tester.generate_http_requests(url=lb_url, count=10)

    def end_to_end_ssl_termination(self):
        """
        Test for
        https://eucalyptus.atlassian.net/browse/EUCA-10477

        This will test ELB end to end encryption.

        @raise Exception:
        """
        self.debug("ELB End To End SSl test")

        # get ELB url
        lb_url = "https://"+self.load_balancer.dns_name+"/instance-name"

        # upload server certificate
        frontend_cert_name = "elb-e2e-ssl-test-"+str(int(time.time()))
        self.tester.add_server_cert(cert_name=frontend_cert_name)

        # get the arn of the certificate
        frontend_cert_arn = self.tester.get_server_cert(frontend_cert_name).arn

        # remove any existing listeners
        self.remove_all_listeners(lb_name=self.load_balancer.name)

        # update listener
        self.tester.update_listener(lb=self.load_balancer,
                                    lb_port=443,
                                    lb_protocol="HTTPS",
                                    instance_port=443,
                                    instance_protocol="HTTPS",
                                    cert_arn=frontend_cert_arn)

        # ssl certfile
        cert_file = "ssl_server_certs_basics.crt"

        # PublicKeyPolicy
        cert_body = open(join(self.resource_dir, cert_file)).read()
        publickey_policy_attributes = {'PublicKey': cert_body}
        publickey_policy_name = "snakecert"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=publickey_policy_name,
                                     policy_type="PublicKeyPolicyType",
                                     policy_attributes=publickey_policy_attributes)

        # BackendServerAuthenticationPolicy
        backend_policy_attributes = {'PublicKeyPolicyName': publickey_policy_name}
        backend_policy_name = "snakeauth"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=backend_policy_name,
                                     policy_type="BackendServerAuthenticationPolicyType",
                                     policy_attributes=backend_policy_attributes)

        self.tester.set_lb_policy_for_back_end_server(lb_name=self.load_balancer.name,
                                                      instance_port=443,
                                                      policy_name=backend_policy_name)

        # perform https requests to LB
        self.tester.sleep(15)
        self.tester.generate_http_requests(url=lb_url, count=10)

    def only_back_end_authentication(self):
        """
        Test for
        https://eucalyptus.atlassian.net/browse/EUCA-10477

        This will test HTTP connection to ELB front end with back end encryption of traffic to the registered instances.

        @raise Exception:
        """
        self.debug("ELB Back End SSl test")

        # get ELB url
        lb_url = "http://"+self.load_balancer.dns_name+"/instance-name"

        # remove any existing listeners
        self.remove_all_listeners(lb_name=self.load_balancer.name)

        # update listener
        self.tester.update_listener(lb=self.load_balancer,
                                    lb_port=80,
                                    lb_protocol="HTTP",
                                    instance_port=443,
                                    instance_protocol="HTTPS")

        # ssl certfile
        cert_file = "ssl_server_certs_basics.crt"

        # PublicKeyPolicy
        cert_body = open(join(self.resource_dir, cert_file)).read()
        publickey_policy_attributes = {'PublicKey': cert_body}
        publickey_policy_name = "snakecert-backend-only"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=publickey_policy_name,
                                     policy_type="PublicKeyPolicyType",
                                     policy_attributes=publickey_policy_attributes)

        # BackendServerAuthenticationPolicy
        backend_policy_attributes = {'PublicKeyPolicyName': publickey_policy_name}
        backend_policy_name = "snakeauth-back-end-only"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=backend_policy_name,
                                     policy_type="BackendServerAuthenticationPolicyType",
                                     policy_attributes=backend_policy_attributes)

        self.tester.set_lb_policy_for_back_end_server(lb_name=self.load_balancer.name,
                                                      instance_port=443,
                                                      policy_name=backend_policy_name)

        # perform https requests to LB
        self.tester.sleep(15)
        self.tester.generate_http_requests(url=lb_url, count=10)

    def invalid_backend_authentication(self):
        """
        Test for
        https://eucalyptus.atlassian.net/browse/EUCA-10477

        This is a negative test for ELB backend auth. Policy has invalid cert for backend authentication. We expect an
        HTTP 503 error returned

        @raise Exception:
        """
        self.debug("ELB Back End SSl Negative test")

        # get ELB url
        lb_url = "http://"+self.load_balancer.dns_name+"/instance-name"

        # remove any existing listeners
        self.remove_all_listeners(lb_name=self.load_balancer.name)

        # update listener
        self.tester.update_listener(lb=self.load_balancer,
                                    lb_port=80,
                                    lb_protocol="HTTP",
                                    instance_port=443,
                                    instance_protocol="HTTPS")

        # ssl certfile
        cert_file = "bad_cert.crt"

        # PublicKeyPolicy
        cert_body = open(join(self.resource_dir, cert_file)).read()
        publickey_policy_attributes = {'PublicKey': cert_body}
        publickey_policy_name = "snakecertBAD"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=publickey_policy_name,
                                     policy_type="PublicKeyPolicyType",
                                     policy_attributes=publickey_policy_attributes)

        # BackendServerAuthenticationPolicy
        backend_policy_attributes = {'PublicKeyPolicyName': publickey_policy_name}
        backend_policy_name = "snakeauthBAD"
        self.tester.create_lb_policy(lb_name=self.load_balancer.name,
                                     policy_name=backend_policy_name,
                                     policy_type="BackendServerAuthenticationPolicyType",
                                     policy_attributes=backend_policy_attributes)

        self.tester.set_lb_policy_for_back_end_server(lb_name=self.load_balancer.name,
                                                      instance_port=443,
                                                      policy_name=backend_policy_name)

        # perform https requests to LB
        self.tester.sleep(15)
        got_expected_error = False
        try:
            self.tester.generate_http_requests(url=lb_url, count=10)
        except HTTPError as e:
            self.debug("PASSED, received expected error: " + str(e.getcode()) + " " + e.msg)
            got_expected_error = True

        assert got_expected_error, "Did not get expected HTTP 503 ERROR response"

    def remove_all_listeners(self, lb_name):
        for listener in self.tester.describe_lb_listeners(lb_name):
            self.debug("Found existing listener: " + str(listener))
            self.tester.remove_lb_listener(lb_name=lb_name, port=listener.load_balancer_port)

    def clean_method(self):
        try:
            self.tester.delete_all_server_certs()
        except:
            self.debug("Delete certificates went awry")
        finally:
            self.tester.cleanup_artifacts()
            if self.tester.test_resources["security-groups"]:
                for group in self.tester.test_resources["security-groups"]:
                    self.tester.wait_for_result(self.tester.gracefully_delete_group, True, timeout=60, group=group)
Beispiel #48
0
class ImageCreator(EutesterTestCase):
    def __init__(self):
        extra_args = ['--size', '--repo-url', '--packages', '--user-data']
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(config_file=self.args.config,
                                  password=self.args.password,
                                  credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        if not self.args.emi:
            raise Exception(
                "Must pass base image id to use as parameter --emi")
        self.image = self.tester.get_emi(self.args.emi)
        self.address = None
        self.volume = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.reservation = None
        if self.volume:
            self.tester.delete_volume(self.volume)

    def CreateImage(self, zone=None):
        '''Register a BFEBS snapshot'''
        if zone is None:
            zone = self.zone
        user_data = open(self.args.user_data, mode="r").read()
        self.reservation = self.tester.run_instance(image=self.args.emi,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=zone,
                                                    user_data=user_data)
        for instance in self.reservation.instances:
            if instance.root_device_type == "ebs":
                self.CreateEBS(instance)
            else:
                self.CreateInstanceStore(instance)
        self.tester.terminate_instances(self.reservation)
        self.reservation = None

    def get_machine(self, instance):
        assert isinstance(instance, EuInstance)
        distro = instance.sys('head -1 /etc/issue')[0]
        if re.search("CentOS", distro):
            return Machine(instance.ip_address,
                           keypath=self.keypath,
                           distro="CENTOS",
                           distro_ver="6")
        elif re.search("Ubuntu", distro):
            return Machine(instance.ip_address,
                           keypath=self.keypath,
                           distro="UBUNTU",
                           distro_ver="PRECISE")
        raise Exception("Unable to find supported distro on image")

    def CreateEBS(self, instance):
        machine = self.get_machine(instance)
        if hasattr(self.args, 'repo'):
            machine.package_manager.add_repo(self.args.repo_url)
        if hasattr(self.args, 'packages'):
            machine.package_manager.install(self.args.packages)
        volume = self.tester.get_volumes(attached_instance=instance.id)[0]
        snapshot = self.tester.create_snapshot(volume.id)
        self.tester.register_snapshot(snapshot)

    def CreateInstanceStore(self, instance):
        machine = self.get_machine(instance)
        if hasattr(self.args, 'repo'):
            machine.package_manager.add_repo(self.args.repo_url)
        if hasattr(self.args, 'packages'):
            machine.package_manager.install(self.args.packages)
        mount_point = "/mnt"
        instance.sys("mfks.ext3 -F /dev/" + instance.rootfs_device + "2")
        instance.sys("mount " + "/dev/" + instance.rootfs_device + "2 " +
                     mount_point)

        image_file_name = "server.img"
        remote_image_file = mount_point + "/" + image_file_name
        instance.sys("dd bs=1M if=/dev/" + instance.rootfs_device + "1 of=" +
                     remote_image_file,
                     timeout=600)
        machine.sftp.get(remote_image_file, image_file_name)

    def find_filesystem(self, machine, block_device):
        for device in machine.sys('ls -1 ' + block_device + "*"):
            if machine.found('file -s ' + device, "filesystem"):
                return device
        raise Exception("Unable to find a filesystem on block device:" +
                        block_device)
Beispiel #49
0
class EbsTestSuite(EutesterTestCase):
    
    tester = None
    zonelist = []
    snaps = []
    keypair = None
    group = None
    multicluster=False
    image = None
    
    def __init__(self, 
                 name=None,
                 args=None,
                 tester=None, 
                 zone=None, 
                 config_file='../input/2b_tested.lst', 
                 password="******", 
                 inst_pass=None,
                 credpath=None, 
                 volumes=None, 
                 keypair=None, 
                 group=None, 
                 emi=None,
                 root_device_type='instance-store',
                 vmtype='c1.medium',
                 eof=1):
        
        self.args = args
        self.setuptestcase(name)
        if tester is None:
            self.tester = Eucaops( config_file=config_file,password=password,credpath=credpath)
        else:
            self.tester = tester
        self.tester.exit_on_fail = eof
    
        self.testlist =[]
        self.inst_pass=inst_pass
        if emi:
            self.image = self.tester.get_emi(emi=emi)
        else:
            self.image = self.tester.get_emi(root_device_type=root_device_type, not_location='windows')
        
        self.vmtype = vmtype
        self.zone = None    
        self.zonelist = []
            
        #create some zone objects and append them to the zonelist
        if self.zone:
            self.zone = TestZone(zone)
            self.zonelist.append(self.zone)
        else: 
            self.setup_testzones()
    
        #If the list of volumes passed in looks good, sort them into the zones
        if self.volumes_list_check(volumes):
            self.sort_volumes(volumes)
            
        #Setup our security group for later use
        if (group is not None):
            self.group = group
        else:
            group_name='EbsTestGroup'
            
            try:
                self.group = self.tester.add_group(group_name,fail_if_exists=False)
                self.tester.authorize_group_by_name(self.group.name)
                self.tester.authorize_group_by_name(self.group.name,protocol="icmp",port=-1)
            except Exception, e:  
                self.debug(self.tester.get_traceback())  
                raise Exception("Error when setting up group:"+str(group_name)+", Error:"+str(e))   
        
    
        #Setup the keypairs for later use
        if not self.inst_pass:
            try:
                if (keypair is not None):
                    self.keypair = keypair
                else:     
                    keys = self.tester.get_all_current_local_keys() 
                    if keys != []:
                        self.keypair = keys[0]
                    else:
                        self.keypair = keypair = self.tester.add_keypair('ebs_test_key-' + str(time.time()))
            except Exception, ke:
                raise Exception("Failed to find/create a keypair, error:" + str(ke))
Beispiel #50
0
class LoadBalancing(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)

        # test resource hash
        self.test_hash = str(int(time.time()))

        # Add and authorize a group for the instances
        self.group = self.tester.add_group(group_name="group-" +
                                           self.test_hash)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=80,
                                            protocol="tcp")

        # Generate a keypair for the instances
        self.keypair = self.tester.add_keypair("keypair-" + self.test_hash)
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        # User data file
        self.user_data = "./testcases/cloud_user/elb/test_data/webserver_user_data.sh"

        # Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi()

        # Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        # create base load balancer
        self.load_balancer_port = 80
        self.load_balancer = self.tester.create_load_balancer(
            zones=[self.zone],
            name="elb-" + self.test_hash,
            load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)

        # create autoscaling group of webservers that register to the load balancer
        self.count = 2
        (self.web_servers) = self.tester.create_as_webservers(
            name=self.test_hash,
            keypair=self.keypair.name,
            group=self.group.name,
            zone=self.zone,
            image=self.image.id,
            count=self.count,
            user_data=self.user_data,
            load_balancer=self.load_balancer.name)

        # web servers scaling group
        self.asg = self.tester.describe_as_group(name="asg-" + self.test_hash)

        # wait until scaling instances are InService with the load balancer before continuing - 5 min timeout
        assert self.tester.wait_for_result(self.tester.wait_for_lb_instances,
                                           True,
                                           timeout=300,
                                           lb=self.load_balancer.name,
                                           number=self.count)

    def clean_method(self):
        self.tester.cleanup_artifacts()
        if self.tester.test_resources["security-groups"]:
            for group in self.tester.test_resources["security-groups"]:
                self.tester.wait_for_result(
                    self.tester.gracefully_delete_group,
                    True,
                    timeout=60,
                    group=group)

    def GenerateRequests(self):
        """
        This will test the most basic use case for a load balancer.
        Uses to backend instances with httpd servers.
        """
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "http://{0}:{1}/instance-name".format(lb_ip,
                                                       self.load_balancer_port)
        self.tester.generate_http_requests(url=lb_url, count=1000)
Beispiel #51
0
class EbsTestSuite(EutesterTestCase):

    tester = None
    zonelist = []
    snaps = []
    keypair = None
    group = None
    multicluster = False
    image = None

    def __init__(self,
                 tester=None,
                 zone=None,
                 config_file='../input/2b_tested.lst',
                 password="******",
                 credpath=None,
                 volumes=None,
                 keypair=None,
                 group=None,
                 image=None,
                 eof=1):

        if tester is None:
            self.tester = Eucaops(config_file=config_file,
                                  password=password,
                                  credpath=credpath)
        else:
            self.tester = tester
        self.tester.exit_on_fail = eof

        self.testlist = []

        self.image = image

        #create some zone objects and append them to the zonelist
        if zone is not None:
            self.zone = TestZone(zone)
            self.zonelist.append(self.zone)
        else:
            for zone in self.tester.service_manager.partitions.keys():
                partition = self.tester.service_manager.partitions.get(zone)
                tzone = TestZone(partition)
                self.zonelist.append(tzone)
                self.multicluster = True

        #If the list of volumes passed in looks good, sort them into the zones
        if self.volumes_list_check(volumes):
            self.sort_volumes(volumes)

        #Setup our security group for later use
        if (group is not None):
            self.group = group
        else:
            group_name = 'EbsTestGroup'

            try:
                self.group = self.tester.add_group(group_name)
                self.tester.authorize_group_by_name(self.group.name)
                self.tester.authorize_group_by_name(self.group.name,
                                                    protocol="icmp",
                                                    port=-1)
            except Exception, e:
                raise Exception("Error when setting up group:" +
                                str(group_name) + ", Error:" + str(e))

        #Setup the keypairs for later use
        try:
            if (keypair is not None):
                self.keypair = keypair
            else:
                keys = self.tester.get_all_current_local_keys()
                if keys != []:
                    self.keypair = keys[0]
                else:
                    self.keypair = keypair = self.tester.add_keypair(
                        'ebs_test_key-' + str(time.time()))
        except Exception, ke:
            raise Exception("Failed to find/create a keypair, error:" +
                            str(ke))
Beispiel #52
0
class ImageCreator(EutesterTestCase):
    def __init__(self):
        extra_args = ['--size', '--repo-url', '--packages', '--user-data']
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = EC2ops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(config_file=self.args.config, password=self.args.password, credpath=self.args.credpath)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        if not self.args.emi:
            raise Exception("Must pass base image id to use as parameter --emi")
        self.image =  self.tester.get_emi(self.args.emi)
        self.address = None
        self.volume = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.reservation = None

    def clean_method(self):
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.reservation = None
        if self.volume:
            self.tester.delete_volume(self.volume)

    def CreateImage(self, zone= None):
        '''Register a BFEBS snapshot'''
        if zone is None:
            zone = self.zone
        user_data = open(self.args.user_data, mode="r").read()
        self.reservation = self.tester.run_instance(image=self.args.emi,keypair=self.keypair.name, group=self.group.name,
                                                    zone=zone, user_data=user_data)
        for instance in self.reservation.instances:
            if instance.root_device_type == "ebs":
                self.CreateEBS(instance)
            else:
                self.CreateInstanceStore(instance)
        self.tester.terminate_instances(self.reservation)
        self.reservation = None

    def get_machine(self, instance):
        assert isinstance(instance, EuInstance)
        distro = instance.sys('head -1 /etc/issue')[0]
        if re.search("CentOS", distro):
            return Machine(instance.ip_address, keypath=self.keypath, distro="CENTOS", distro_ver="6")
        elif re.search("Ubuntu", distro):
            return Machine(instance.ip_address, keypath=self.keypath, distro="UBUNTU", distro_ver="PRECISE")
        raise Exception("Unable to find supported distro on image")

    def CreateEBS(self, instance):
        machine = self.get_machine(instance)
        if hasattr(self.args, 'repo'):
            machine.package_manager.add_repo(self.args.repo_url)
        if hasattr(self.args, 'packages'):
            machine.package_manager.install(self.args.packages)
        volume = self.tester.get_volumes(attached_instance=instance.id)[0]
        snapshot = self.tester.create_snapshot(volume.id)
        self.tester.register_snapshot(snapshot)

    def CreateInstanceStore(self, instance):
        machine = self.get_machine(instance)
        if hasattr(self.args, 'repo'):
            machine.package_manager.add_repo(self.args.repo_url)
        if hasattr(self.args, 'packages'):
            machine.package_manager.install(self.args.packages)
        mount_point = "/mnt"
        instance.sys("mfks.ext3 -F /dev/" + instance.rootfs_device + "2" )
        instance.sys("mount " + "/dev/" + instance.rootfs_device + "2 " + mount_point )

        image_file_name = "server.img"
        remote_image_file = mount_point + "/" + image_file_name
        instance.sys("dd bs=1M if=/dev/" + instance.rootfs_device + "1 of=" + remote_image_file, timeout=600)
        machine.sftp.get(remote_image_file, image_file_name)

    def find_filesystem(self, machine, block_device):
        for device in machine.sys('ls -1 ' + block_device + "*"):
            if machine.found('file -s ' + device, "filesystem"):
                return device
        raise Exception("Unable to find a filesystem on block device:" + block_device)
Beispiel #53
0
class InstanceBasics(EutesterTestCase):
    def __init__(self,
                 name="InstanceBasics",
                 credpath=None,
                 region=None,
                 config_file=None,
                 password=None,
                 emi=None,
                 zone=None,
                 user_data=None,
                 instance_user=None,
                 **kwargs):
        """
        EC2 API tests focused on instance store instances

        :param credpath: Path to directory containing eucarc file
        :param region: EC2 Region to run testcase in
        :param config_file: Configuration file path
        :param password: SSH password for bare metal machines if config is passed and keys arent synced
        :param emi: Image id to use for test
        :param zone: Availability Zone to run test in
        :param user_data: User Data to pass to instance
        :param instance_user: User to login to instance as
        :param kwargs: Additional arguments
        """
        super(InstanceBasics, self).__init__(name=name)
        if region:
            self.tester = EC2ops(credpath=credpath, region=region)
        else:
            self.tester = Eucaops(config_file=config_file,
                                  password=password,
                                  credpath=credpath)
        self.instance_timeout = 600

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        if emi:
            self.image = emi
        else:
            self.image = self.tester.get_emi(root_device_type="instance-store",
                                             not_platform="windows")
        self.address = None
        self.volume = None
        self.private_addressing = False
        if not zone:
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name
        else:
            self.zone = zone
        self.reservation = None
        self.reservation_lock = threading.Lock()
        self.run_instance_params = {
            'image': self.image,
            'user_data': user_data,
            'username': instance_user,
            'keypair': self.keypair.name,
            'group': self.group.name,
            'zone': self.zone,
            'timeout': self.instance_timeout
        }
        self.managed_network = True

        ### If I have access to the underlying infrastructure I can look
        ### at the network mode and only run certain tests where it makes sense
        if hasattr(self.tester, "service_manager"):
            cc = self.tester.get_component_machines("cc")[0]
            network_mode = cc.sys(
                "cat " + self.tester.eucapath +
                "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0]
            if re.search("(SYSTEM|STATIC)", network_mode):
                self.managed_network = False

    def set_reservation(self, reservation):
        self.reservation_lock.acquire()
        self.reservation = reservation
        self.reservation_lock.release()

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def BasicInstanceChecks(self):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        reservation = self.tester.run_instance(**self.run_instance_params)
        for instance in reservation.instances:
            self.assertTrue(self.tester.wait_for_reservation(reservation),
                            'Instance did not go to running')
            self.assertTrue(self.tester.ping(instance.ip_address),
                            'Could not ping instance')
            if self.image.virtualization_type == "paravirtual":
                paravirtual_ephemeral = "/dev/" + instance.rootfs_device + "2"
                self.assertFalse(
                    instance.found("ls -1 " + paravirtual_ephemeral,
                                   "No such file or directory"),
                    "Did not find ephemeral storage at " +
                    paravirtual_ephemeral)
            elif self.image.virtualization_type == "hvm":
                hvm_ephemeral = "/dev/" + instance.block_device_prefix + "b"
                self.assertFalse(
                    instance.found("ls -1 " + hvm_ephemeral,
                                   "No such file or directory"),
                    "Did not find ephemeral storage at " + hvm_ephemeral)
        self.set_reservation(reservation)
        return reservation

    def ElasticIps(self):
        """
       This case was developed to test elastic IPs in Eucalyptus. This test case does
       not test instances that are launched using private-addressing option.
       The test case executes the following tests:
           - allocates an IP, associates the IP to the instance, then pings the instance.
           - disassociates the allocated IP, then pings the instance.
           - releases the allocated IP address
       If any of the tests fail, the test case will error out, logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation

        for instance in reservation.instances:
            if instance.ip_address == instance.private_ip_address:
                self.tester.debug(
                    "WARNING: System or Static mode detected, skipping ElasticIps"
                )
                return reservation
            self.address = self.tester.allocate_address()
            self.assertTrue(self.address, 'Unable to allocate address')
            self.tester.associate_address(instance, self.address)
            instance.update()
            self.assertTrue(self.tester.ping(instance.ip_address),
                            "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(self.address)
            self.address = None
            assert isinstance(instance, EuInstance)
            self.tester.sleep(5)
            instance.update()
            self.assertTrue(self.tester.ping(instance.ip_address),
                            "Could not ping after dissassociate")
        self.set_reservation(reservation)
        return reservation

    def MultipleInstances(self):
        """
        This case was developed to test the maximum number of m1.small vm types a configured
        cloud can run.  The test runs the maximum number of m1.small vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)

        reservation = self.tester.run_instance(min=2,
                                               max=2,
                                               **self.run_instance_params)
        self.assertTrue(self.tester.wait_for_reservation(reservation),
                        'Not all instances  went to running')
        self.set_reservation(reservation)
        return reservation

    def LargestInstance(self):
        """
        This case was developed to test the maximum number of c1.xlarge vm types a configured
        cloud can run.  The test runs the maximum number of c1.xlarge vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)
        reservation = self.tester.run_instance(type="c1.xlarge",
                                               **self.run_instance_params)
        self.assertTrue(self.tester.wait_for_reservation(reservation),
                        'Not all instances  went to running')
        self.set_reservation(reservation)
        return reservation

    def MetaData(self):
        """
        This case was developed to test the metadata service of an instance for consistency.
        The following meta-data attributes are tested:
           - public-keys/0/openssh-key
           - security-groups
           - instance-id
           - local-ipv4
           - public-ipv4
           - ami-id
           - ami-launch-index
           - reservation-id
           - placement/availability-zone
           - kernel-id
           - public-hostname
           - local-hostname
           - hostname
           - ramdisk-id
           - instance-type
           - any bad metadata that shouldn't be present.
        Missing nodes
         ['block-device-mapping/',  'ami-manifest-path']
        If any of these tests fail, the test case will error out; logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation
        for instance in reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-keys/0/openssh-key")
                    [0].split('eucalyptus.')[-1], self.keypair.name),
                'Incorrect public key in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("security-groups")[0],
                    self.group.name), 'Incorrect security group in metadata')
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], ""))
            self.assertTrue(
                re.match(instance.get_metadata("instance-id")[0], instance.id),
                'Incorrect instance id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-ipv4")[0],
                    instance.private_ip_address),
                'Incorrect private ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-ipv4")[0],
                    instance.ip_address), 'Incorrect public ip in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-id")[0], instance.image_id),
                'Incorrect ami id in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("ami-launch-index")[0],
                    instance.ami_launch_index),
                'Incorrect launch index in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("reservation-id")[0],
                    reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("placement/availability-zone")[0],
                    instance.placement),
                'Incorrect availability-zone in metadata')
            if self.image.virtualization_type == "paravirtual":
                self.assertTrue(
                    re.match(
                        instance.get_metadata("kernel-id")[0],
                        instance.kernel), 'Incorrect kernel id in metadata')
                self.assertTrue(
                    re.match(
                        instance.get_metadata("ramdisk-id")[0],
                        instance.ramdisk), 'Incorrect ramdisk in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("public-hostname")[0],
                    instance.public_dns_name),
                'Incorrect public host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("local-hostname")[0],
                    instance.private_dns_name),
                'Incorrect private host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("hostname")[0],
                    instance.private_dns_name),
                'Incorrect host name in metadata')
            self.assertTrue(
                re.match(
                    instance.get_metadata("instance-type")[0],
                    instance.instance_type),
                'Incorrect instance type in metadata')
            bad_meta_data_keys = ['foobar']
            for key in bad_meta_data_keys:
                self.assertTrue(
                    re.search("Not Found",
                              "".join(instance.get_metadata(key))),
                    'No fail message on invalid meta-data node')
        self.set_reservation(reservation)
        return reservation

    def DNSResolveCheck(self):
        """
        This case was developed to test DNS resolution information for public/private DNS
        names and IP addresses.  The tested DNS resolution behavior is expected to follow

        AWS EC2.  The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - nslookup on hostname; checks to see if it matches local-ipv4
           - nslookup on local-hostname; check to see if it matches local-ipv4
           - nslookup on local-ipv4; check to see if it matches local-hostname
           - nslookup on public-hostname; check to see if it matches local-ipv4
           - nslookup on public-ipv4; check to see if it matches public-host
        If any of these tests fail, the test case will error out; logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation

        for instance in reservation.instances:
            if not re.search("internal", instance.private_dns_name):
                self.tester.debug(
                    "Did not find instance DNS enabled, skipping test")
                self.set_reservation(reservation)
                return reservation
            # Test to see if Dynamic DNS has been configured #
            # Per AWS standard, resolution should have private hostname or private IP as a valid response
            # Perform DNS resolution against public IP and public DNS name
            # Perform DNS resolution against private IP and private DNS name
            # Check to see if nslookup was able to resolve
            assert isinstance(instance, EuInstance)
            # Check nslookup to resolve public DNS Name to local-ipv4 address
            self.assertTrue(
                instance.found("nslookup " + instance.public_dns_name,
                               instance.private_ip_address),
                "Incorrect DNS resolution for hostname.")
            # Check nslookup to resolve public-ipv4 address to public DNS name
            if self.managed_network:
                self.assertTrue(
                    instance.found("nslookup " + instance.ip_address,
                                   instance.public_dns_name),
                    "Incorrect DNS resolution for public IP address")
            # Check nslookup to resolve private DNS Name to local-ipv4 address
            if self.managed_network:
                self.assertTrue(
                    instance.found("nslookup " + instance.private_dns_name,
                                   instance.private_ip_address),
                    "Incorrect DNS resolution for private hostname.")
            # Check nslookup to resolve local-ipv4 address to private DNS name
            self.assertTrue(
                instance.found("nslookup " + instance.private_ip_address,
                               instance.private_dns_name),
                "Incorrect DNS resolution for private IP address")
            self.assertTrue(self.tester.ping(instance.public_dns_name))
        self.set_reservation(reservation)
        return reservation

    def Reboot(self):
        """
        This case was developed to test IP connectivity and volume attachment after
        instance reboot.  The following tests are done for this test case:
                   - creates a 1 gig EBS volume, then attach volume
                   - reboot instance
                   - attempts to connect to instance via ssh
                   - checks to see if EBS volume is attached
                   - detaches volume
                   - deletes volume
        If any of these tests fail, the test case will error out; logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation
        for instance in reservation.instances:
            ### Create 1GB volume in first AZ
            volume = self.tester.create_volume(instance.placement,
                                               size=1,
                                               timepergig=180)
            instance.attach_volume(volume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(volume)
            self.tester.delete_volume(volume)
        self.set_reservation(reservation)
        return reservation

    def Churn(self):
        """
        This case was developed to test robustness of Eucalyptus by starting instances,
        stopping them before they are running, and increase the time to terminate on each
        iteration.  This test case leverages the BasicInstanceChecks test case. The
        following steps are ran:
            - runs BasicInstanceChecks test case 5 times, 10 second apart.
            - While each test is running, run and terminate instances with a 10sec sleep in between.
            - When a test finishes, rerun BasicInstanceChecks test case.
        If any of these tests fail, the test case will error out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)
        try:
            available_instances_before = self.tester.get_available_vms(
                zone=self.zone)
            if available_instances_before > 4:
                count = 4
            else:
                count = available_instances_before
        except IndexError, e:
            self.debug("Running as non-admin, defaulting to 4 VMs")
            available_instances_before = count = 4

        future_instances = []

        with ThreadPoolExecutor(max_workers=count) as executor:
            ## Start asynchronous activity
            ## Run 5 basic instance check instances 10s apart
            for i in xrange(count):
                future_instances.append(
                    executor.submit(self.BasicInstanceChecks))
                self.tester.sleep(10)

        with ThreadPoolExecutor(max_workers=count) as executor:
            ## Start asynchronous activity
            ## Terminate all instances
            for future in future_instances:
                executor.submit(self.tester.terminate_instances,
                                future.result())

        def available_after_greater():
            return self.tester.get_available_vms(
                zone=self.zone) >= available_instances_before

        self.tester.wait_for_result(available_after_greater,
                                    result=True,
                                    timeout=360)
Beispiel #54
0
class InstanceBasics(unittest.TestCase):
    def setUp(self, credpath=None):
        # Setup basic eutester object
        if credpath is None:
            credpath = arg_credpath
        self.tester = Eucaops( credpath=credpath)
        self.tester.poll_count = 120
        
        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        ### Use a random instance-store backed EMI if no cli option set 
        if arg_emi is False:
            self.image = self.tester.get_emi(root_device_type="instance-store")
        else: 
            self.image  = self.tester.get_emi(arg_emi)
        self.reservation = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

    
    def tearDown(self):
        if self.reservation is not None:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)
        self.reservation = None
        self.group = None
        self.keypair = None
        self.tester = None
        self.ephemeral = None


    def BasicInstanceChecks(self, zone = None):
        """Instance checks including reachability and ephemeral storage"""
        if zone is None:
            zone = self.zone
        if self.reservation is None:
            self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Instance did not go to running')
            self.assertNotEqual( instance.public_dns_name, instance.private_ip_address, 'Public and private IP are the same')
            self.assertTrue( self.tester.ping(instance.public_dns_name), 'Could not ping instance')
            self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2",  "No such file or directory"),  'Did not find ephemeral storage at ' + instance.rootfs_device + "2")
        return self.reservation
    
    def ElasticIps(self, zone = None):
        """ Basic test for elastic IPs
            Allocate an IP, associate it with an instance, ping the instance
            Disassociate the IP, ping the instance
            Release the address"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name,zone=zone)
        for instance in self.reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address,'Unable to allocate address')
            self.tester.associate_address(instance, address)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(address)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping after dissassociate")
        return self.reservation
    
    def MaxSmallInstances(self, available_small=None,zone = None):
        """Run the maximum m1.smalls available"""
        if available_small is None:
            available_small = self.tester.get_available_vms()
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,min=available_small, max=available_small, zone=zone)
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
        return self.reservation
    
    def LargestInstance(self, zone = None): 
        """Run 1 of the largest instance c1.xlarge"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name,type="c1.xlarge",zone=zone)
        self.assertTrue( self.tester.wait_for_reservation(self.reservation) ,'Not all instances  went to running')
        return self.reservation
    
    def MetaData(self, zone=None):
        """Check metadata for consistency"""
        # Missing nodes
        # ['block-device-mapping/',  'ami-manifest-path']
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata')
            self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata') 
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) 
            self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata')  
            self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata')          
            self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata')
            self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata')
            self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], self.reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata')
            self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel),  'Incorrect kernel id in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.dns_name), 'Incorrect host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type
            self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata')
            BAD_META_DATA_KEYS = ['foobar']
            for key in BAD_META_DATA_KEYS:
                self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node')
        return self.reservation
           
    def DNSResolveCheck(self, zone=None):
        """Check DNS resolution information for public/private DNS names and IP addresses.  The DNS resolution behavior follows AWS EC2."""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
           
            # Test to see if Dynamic DNS has been configured # 
            if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]):
                # Per AWS standard, resolution should have private hostname or private IP as a valid response
                # Perform DNS resolution against private IP and private DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("hostname")[0])[3]), "DNS lookup failed for hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("hostname")[0])[5]), "Incorrect DNS resolution for hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("local-hostname")[0])[3]), "DNS lookup failed for private hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("local-hostname")[0])[5]), "Incorrect DNS resolution for private hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("local-ipv4")[0])[3]), "DNS lookup failed for private IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on local-ipv4 address returns local-hostname
                self.assertTrue(re.search(instance.get_metadata("local-hostname")[0], instance.sys("nslookup " +  instance.get_metadata("local-ipv4")[0])[4]), "Incorrect DNS resolution for private IP address")       
                # Perform DNS resolution against public IP and public DNS name
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("public-hostname")[0])[3]), "DNS lookup failed for public-hostname.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-hostname returns local-ipv4 address
                self.assertTrue(re.search(instance.get_metadata("local-ipv4")[0], instance.sys("nslookup " + instance.get_metadata("public-hostname")[0])[5]), "Incorrect DNS resolution for public-hostname.")
                # Check to see if nslookup was able to resolve
                self.assertTrue(re.search('answer\:', instance.sys("nslookup " +  instance.get_metadata("public-ipv4")[0])[3]), "DNS lookup failed for public IP address.")
                # Since nslookup was able to resolve, now check to see if nslookup on public-ipv4 address returns public-hostname
                self.assertTrue(re.search(instance.get_metadata("public-hostname")[0], instance.sys("nslookup " +  instance.get_metadata("public-ipv4")[0])[4]), "Incorrect DNS resolution for public IP address")

        return self.reservation
           
    def DNSCheck(self, zone=None):
        """Check to make sure Dynamic DNS reports correct information for public/private IP address and DNS names"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image,keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
           
            # Test to see if Dynamic DNS has been configured # 
            if re.match("internal", instance.private_dns_name.split('eucalyptus.')[-1]):
                # Make sure that private_ip_address is not the same as local-hostname
                self.assertFalse(re.match(instance.private_ip_address, instance.private_dns_name), 'local-ipv4 and local-hostname are the same with DNS on') 
                # Make sure that ip_address is not the same as public-hostname
                self.assertFalse(re.match(instance.ip_address, instance.public_dns_name), 'public-ipv4 and public-hostname are the same with DNS on')

        return self.reservation

    def Reboot(self, zone=None):
        """Reboot instance ensure IP connectivity and volumes stay attached"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=zone)
        for instance in self.reservation.instances:
            ### Create 1GB volume in first AZ
            self.volume = self.tester.create_volume(instance.placement, 1)
            euvolume = EuVolume.make_euvol_from_vol(self.volume)
            self.volume_device = instance.attach_euvolume(euvolume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(euvolume)
        return self.reservation
    
    def Churn(self, testcase="BasicInstanceChecks"):
        """Start instances and stop them before they are running, increase time to terminate on each iteration"""
        from multiprocessing import Process
        from multiprocessing import Queue
        ### Increase time to terminate by step seconds on each iteration
        step = 10
        
        ## Run through count iterations of test
        count = self.tester.get_available_vms("m1.small") / 2
        thread_pool = []
        queue_pool = []
        
        ## Start asynchronous activity
        ## Run 5 basic instance check instances 10s apart
        for i in xrange(count):
            q = Queue()
            queue_pool.append(q)
            p = Process(target=self.run_testcase_thread, args=(q, step * i,testcase))
            thread_pool.append(p)
            self.tester.debug("Starting Thread " + str(i) +" in " + str(step * i))
            p.start()

        ### While the other tests are running, run and terminate count instances with a 10s sleep in between
        for i in xrange(count):
            self.reservation = self.image.run()
            self.tester.debug("Sleeping for " + str(step) + " seconds before terminating instances")
            self.tester.sleep(step )
            for instance in self.reservation.instances:
                instance.terminate()
                self.assertTrue(self.tester.wait_for_instance(instance, "terminated"), "Instance did not go to terminated")
        
        ### Once the previous test is complete rerun the BasicInstanceChecks test case
        ### Wait for an instance to become available
        count = self.tester.get_available_vms("m1.small")
        poll_count = 30
        while poll_count > 0:
            self.tester.sleep(5)
            count = self.tester.get_available_vms("m1.small")
            if count > 0:
                self.tester.debug("There is an available VM to use for final test")
                break
            poll_count -= 1
        
        fail_count = 0
        ### Block until the script returns a result
        for queue in queue_pool:
            test_result = queue.get(True)
            self.tester.debug("Got Result: " + str(test_result) )
            fail_count += test_result

        for thread in thread_pool:
            thread.join()
        
        if fail_count > 0:
            raise Exception("Failure detected in one of the " + str(count)  + " Basic Instance tests")

        self.tester.debug("Successfully completed churn test")

    def PrivateIPAddressing(self, zone = None):
        """Basic test to run an instance with Private only IP
           and later allocate/associate/diassociate/release 
           an Elastic IP. In the process check after diassociate
           the instance has only got private IP or new Public IP
           gets associated to it"""
        if zone is None:
            zone = self.zone
        self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, private_addressing=True, zone=zone)
        for instance in self.reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address,'Unable to allocate address')
            self.assertTrue(self.tester.associate_address(instance, address))
            self.tester.sleep(30)
            instance.update()
            self.assertTrue( self.tester.ping(instance.public_dns_name), "Could not ping instance with new IP")
            address.disassociate()
            self.tester.sleep(30)
            instance.update()
            self.assertFalse( self.tester.ping(instance.public_dns_name), "Was able to ping instance that should have only had a private IP")
            address.release()
            if instance.public_dns_name != instance.private_dns_name:
                self.fail("Instance received a new public IP: " + instance.public_dns_name)
        return self.reservation
    
    def ReuseAddresses(self, zone = None):
        """ Run instances in series and ensure they get the same address"""
        prev_address = None
        if zone is None:
            zone = self.zone
        ### Run the test 5 times in a row
        for i in xrange(5):
            self.reservation = self.tester.run_instance(keypair=self.keypair.name, group=self.group.name, zone=zone)
            for instance in self.reservation.instances:
                if prev_address is not None:
                    self.assertTrue(re.search(str(prev_address) ,str(instance.public_dns_name)), str(prev_address) +" Address did not get reused but rather  " + str(instance.public_dns_name))
                prev_address = instance.public_dns_name
            self.tester.terminate_instances(self.reservation)
            
    def run_testcase_thread(self, queue,delay = 20, name="MetaData"):
        ### Thread that runs a testcase (function) and returns its pass or fail result
        self.tester.sleep(delay)
        try:
            result = unittest.TextTestRunner(verbosity=2).run(InstanceBasics(name))
        except Exception, e:
            queue.put(1)
            raise e
        if result.wasSuccessful():
            self.tester.debug("Passed test: " + name)
            queue.put(0)
            return False
        else:
            self.tester.debug("Failed test: " + name)
            queue.put(1)
            return True
Beispiel #55
0
class SSLTermination(EutesterTestCase):
    def __init__(self, extra_args=None):
        self.cert_name = "elb-ssl-test-" + str(time.time())
        self.setuptestcase()
        self.setup_parser()
        if extra_args:
            for arg in extra_args:
                self.parser.add_argument(arg)
        self.get_args()

        # Setup basic eutester object
        if self.args.region:
            self.tester = ELBops(credpath=self.args.credpath,
                                 region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)

        ### Get an image
        self.image = self.args.emi
        if not self.image:
            self.image = self.tester.get_emi(root_device_type="instance-store")

        ### Populate available zones
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.load_balancer_port = 80

        (self.web_servers, self.filename) = self.tester.create_web_servers(
            keypair=self.keypair,
            group=self.group,
            zone=self.zone,
            port=self.load_balancer_port,
            filename='instance-name',
            image=self.image,
            count=1)

        self.load_balancer = self.tester.create_load_balancer(
            zones=[self.zone],
            name="test-" + str(time.time()),
            load_balancer_port=self.load_balancer_port)
        assert isinstance(self.load_balancer, LoadBalancer)
        self.tester.register_lb_instances(self.load_balancer.name,
                                          self.web_servers.instances)

    def ssl_termination(self):
        """
        This will test ELB with HTTPS listener.

        @raise Exception:
        """
        self.debug("ELB SSl test")
        """get ELB ip info and setup url"""
        dns = self.tester.service_manager.get_enabled_dns()
        lb_ip = dns.resolve(self.load_balancer.dns_name)
        lb_url = "https://{0}/instance-name".format(lb_ip)
        """upload server certificate"""
        self.tester.add_server_cert(cert_name=self.cert_name)
        """create a new listener on HTTPS port 443 and remove listener on port 80"""
        cert_arn = self.tester.get_server_cert(self.cert_name).arn
        listener = (443, 80, "HTTPS", cert_arn)
        self.tester.add_lb_listener(lb_name=self.load_balancer.name,
                                    listener=listener)
        self.tester.remove_lb_listener(lb_name=self.load_balancer.name,
                                       port=self.load_balancer_port)
        """perform https requests to LB"""
        self.tester.generate_http_requests(url=lb_url, count=10)

    def clean_method(self):
        self.tester.delete_server_cert(self.cert_name)
        self.tester.cleanup_artifacts()
Beispiel #56
0
class InstanceBasics(EutesterTestCase):
    def __init__( self, name="InstanceBasics", credpath=None, region=None, config_file=None, password=None, emi=None, zone=None,
                  user_data=None, instance_user=None, **kwargs):
        """
        EC2 API tests focused on instance store instances

        :param credpath: Path to directory containing eucarc file
        :param region: EC2 Region to run testcase in
        :param config_file: Configuration file path
        :param password: SSH password for bare metal machines if config is passed and keys arent synced
        :param emi: Image id to use for test
        :param zone: Availability Zone to run test in
        :param user_data: User Data to pass to instance
        :param instance_user: User to login to instance as
        :param kwargs: Additional arguments
        """
        super(InstanceBasics, self).__init__(name=name)
        if region:
            self.tester = EC2ops(credpath=credpath, region=region)
        else:
            self.tester = Eucaops(config_file=config_file, password=password, credpath=credpath)
        self.instance_timeout = 480

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        if emi:
            self.image = emi
        else:
            self.image = self.tester.get_emi(root_device_type="instance-store",not_location="loadbalancer")
        self.address = None
        self.volume = None
        self.private_addressing = False
        if not zone:
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name
        else:
            self.zone = zone
        self.reservation = None
        self.reservation_lock = threading.Lock()
        self.run_instance_params = {'image': self.image, 'user_data': user_data, 'username': instance_user,
                                'keypair': self.keypair.name, 'group': self.group.name,'zone': self.zone,
                                'timeout': self.instance_timeout}
        self.managed_network = True

        ### If I have access to the underlying infrastructure I can look
        ### at the network mode and only run certain tests where it makes sense
        if hasattr(self.tester,"service_manager"):
            cc = self.tester.get_component_machines("cc")[0]
            network_mode = cc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus.conf | grep MODE")[0]
            if re.search("(SYSTEM|STATIC)", network_mode):
                self.managed_network = False

    def set_reservation(self, reservation):
        self.reservation_lock.acquire()
        self.reservation = reservation
        self.reservation_lock.release()

    def clean_method(self):
        self.tester.cleanup_artifacts()

    def BasicInstanceChecks(self):
        """
        This case was developed to run through a series of basic instance tests.
             The tests are as follows:
                   - execute run_instances command
                   - make sure that public DNS name and private IP aren't the same
                       (This is for Managed/Managed-NOVLAN networking modes)
                   - test to see if instance is ping-able
                   - test to make sure that instance is accessible via ssh
                       (ssh into instance and run basic ls command)
             If any of these tests fail, the test case will error out, logging the results.
        """
        reservation = self.tester.run_instance(**self.run_instance_params)
        for instance in reservation.instances:
            self.assertTrue( self.tester.wait_for_reservation(reservation) ,'Instance did not go to running')
            self.assertTrue( self.tester.ping(instance.ip_address), 'Could not ping instance')
            self.assertFalse( instance.found("ls -1 /dev/" + instance.rootfs_device + "2",  "No such file or directory"),  'Did not find ephemeral storage at ' + instance.rootfs_device + "2")
        self.set_reservation(reservation)
        return reservation

    def ElasticIps(self):
        """
       This case was developed to test elastic IPs in Eucalyptus. This test case does
       not test instances that are launched using private-addressing option.
       The test case executes the following tests:
           - allocates an IP, associates the IP to the instance, then pings the instance.
           - disassociates the allocated IP, then pings the instance.
           - releases the allocated IP address
       If any of the tests fail, the test case will error out, logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation

        for instance in reservation.instances:
            if instance.ip_address == instance.private_ip_address:
                self.tester.debug("WARNING: System or Static mode detected, skipping ElasticIps")
                return reservation
            self.address = self.tester.allocate_address()
            self.assertTrue(self.address,'Unable to allocate address')
            self.tester.associate_address(instance, self.address)
            instance.update()
            self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping instance with new IP")
            self.tester.disassociate_address_from_instance(instance)
            self.tester.release_address(self.address)
            self.address = None
            assert isinstance(instance, EuInstance)
            self.tester.sleep(5)
            instance.update()
            self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping after dissassociate")
        self.set_reservation(reservation)
        return reservation

    def MultipleInstances(self):
        """
        This case was developed to test the maximum number of m1.small vm types a configured
        cloud can run.  The test runs the maximum number of m1.small vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)

        reservation = self.tester.run_instance(min=2, max=2, **self.run_instance_params)
        self.assertTrue(self.tester.wait_for_reservation(reservation) ,'Not all instances  went to running')
        self.set_reservation(reservation)
        return reservation

    def LargestInstance(self):
        """
        This case was developed to test the maximum number of c1.xlarge vm types a configured
        cloud can run.  The test runs the maximum number of c1.xlarge vm types allowed, then
        tests to see if all the instances reached a running state.  If there is a failure,
        the test case errors out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)
        reservation = self.tester.run_instance(type="c1.xlarge", **self.run_instance_params)
        self.assertTrue( self.tester.wait_for_reservation(reservation) ,'Not all instances  went to running')
        self.set_reservation(reservation)
        return reservation

    def MetaData(self):
        """
        This case was developed to test the metadata service of an instance for consistency.
        The following meta-data attributes are tested:
           - public-keys/0/openssh-key
           - security-groups
           - instance-id
           - local-ipv4
           - public-ipv4
           - ami-id
           - ami-launch-index
           - reservation-id
           - placement/availability-zone
           - kernel-id
           - public-hostname
           - local-hostname
           - hostname
           - ramdisk-id
           - instance-type
           - any bad metadata that shouldn't be present.
        Missing nodes
         ['block-device-mapping/',  'ami-manifest-path']
        If any of these tests fail, the test case will error out; logging the results.
        """
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation
        for instance in reservation.instances:
            ## Need to verify  the public key (could just be checking for a string of a certain length)
            self.assertTrue(re.match(instance.get_metadata("public-keys/0/openssh-key")[0].split('eucalyptus.')[-1], self.keypair.name), 'Incorrect public key in metadata')
            self.assertTrue(re.match(instance.get_metadata("security-groups")[0], self.group.name), 'Incorrect security group in metadata')
            # Need to validate block device mapping
            #self.assertTrue(re.search(instance_ssh.get_metadata("block-device-mapping/")[0], "")) 
            self.assertTrue(re.match(instance.get_metadata("instance-id")[0], instance.id), 'Incorrect instance id in metadata')
            self.assertTrue(re.match(instance.get_metadata("local-ipv4")[0] , instance.private_ip_address), 'Incorrect private ip in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-ipv4")[0] , instance.ip_address), 'Incorrect public ip in metadata')
            self.assertTrue(re.match(instance.get_metadata("ami-id")[0], instance.image_id), 'Incorrect ami id in metadata')
            self.assertTrue(re.match(instance.get_metadata("ami-launch-index")[0], instance.ami_launch_index), 'Incorrect launch index in metadata')
            self.assertTrue(re.match(instance.get_metadata("reservation-id")[0], reservation.id), 'Incorrect reservation in metadata')
            self.assertTrue(re.match(instance.get_metadata("placement/availability-zone")[0], instance.placement), 'Incorrect availability-zone in metadata')
            self.assertTrue(re.match(instance.get_metadata("kernel-id")[0], instance.kernel),  'Incorrect kernel id in metadata')
            self.assertTrue(re.match(instance.get_metadata("public-hostname")[0], instance.public_dns_name), 'Incorrect public host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("local-hostname")[0], instance.private_dns_name), 'Incorrect private host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("hostname")[0], instance.private_dns_name), 'Incorrect host name in metadata')
            self.assertTrue(re.match(instance.get_metadata("ramdisk-id")[0], instance.ramdisk ), 'Incorrect ramdisk in metadata') #instance-type
            self.assertTrue(re.match(instance.get_metadata("instance-type")[0], instance.instance_type ), 'Incorrect instance type in metadata')
            BAD_META_DATA_KEYS = ['foobar']
            for key in BAD_META_DATA_KEYS:
                self.assertTrue(re.search("Not Found", "".join(instance.get_metadata(key))), 'No fail message on invalid meta-data node')
        self.set_reservation(reservation)
        return reservation

    def DNSResolveCheck(self, zone=None):
        """
        This case was developed to test DNS resolution information for public/private DNS
        names and IP addresses.  The tested DNS resolution behavior is expected to follow
        AWS EC2.  The following tests are ran using the associated meta-data attributes:
           - check to see if Eucalyptus Dynamic DNS is configured
           - nslookup on hostname; checks to see if it matches local-ipv4
           - nslookup on local-hostname; check to see if it matches local-ipv4
           - nslookup on local-ipv4; check to see if it matches local-hostname
           - nslookup on public-hostname; check to see if it matches local-ipv4
           - nslookup on public-ipv4; check to see if it matches public-host
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation

        for instance in reservation.instances:
            if not re.search("internal", instance.private_dns_name):
                self.tester.debug("Did not find instance DNS enabled, skipping test")
                self.set_reservation(reservation)
                return reservation
            # Test to see if Dynamic DNS has been configured #
            # Per AWS standard, resolution should have private hostname or private IP as a valid response
            # Perform DNS resolution against public IP and public DNS name
            # Perform DNS resolution against private IP and private DNS name
            # Check to see if nslookup was able to resolve
            assert isinstance(instance, EuInstance)
            # Check nslookup to resolve public DNS Name to local-ipv4 address
            self.assertTrue(instance.found("nslookup " + instance.public_dns_name + " " + self.tester.ec2.host, instance.private_ip_address), "Incorrect DNS resolution for hostname.")
            # Check nslookup to resolve public-ipv4 address to public DNS name
            if self.managed_network:
                self.assertTrue( instance.found("nslookup " +  instance.ip_address + " " + self.tester.ec2.host, instance.public_dns_name), "Incorrect DNS resolution for public IP address")
            # Check nslookup to resolve private DNS Name to local-ipv4 address
            if self.managed_network:
                self.assertTrue(instance.found("nslookup " + instance.private_dns_name + " " + self.tester.ec2.host, instance.private_ip_address), "Incorrect DNS resolution for private hostname.")
            # Check nslookup to resolve local-ipv4 address to private DNS name
            self.assertTrue(instance.found("nslookup " +  instance.private_ip_address + " " + self.tester.ec2.host, instance.private_dns_name), "Incorrect DNS resolution for private IP address")
        self.assertTrue(self.tester.ping(instance.public_dns_name))
        self.set_reservation(reservation)
        return reservation

    def Reboot(self, zone=None):
        """
        This case was developed to test IP connectivity and volume attachment after
        instance reboot.  The following tests are done for this test case:
                   - creates a 1 gig EBS volume, then attach volume
                   - reboot instance
                   - attempts to connect to instance via ssh
                   - checks to see if EBS volume is attached
                   - detaches volume
                   - deletes volume
        If any of these tests fail, the test case will error out; logging the results.
        """
        if zone is None:
            zone = self.zone
        if not self.reservation:
            reservation = self.tester.run_instance(**self.run_instance_params)
        else:
            reservation = self.reservation
        for instance in reservation.instances:
            ### Create 1GB volume in first AZ
            volume = self.tester.create_volume(instance.placement, 1)
            volume_device = instance.attach_volume(volume)
            ### Reboot instance
            instance.reboot_instance_and_verify(waitconnect=20)
            instance.detach_euvolume(volume)
            self.tester.delete_volume(volume)
        self.set_reservation(reservation)
        return reservation

    def Churn(self):
        """
        This case was developed to test robustness of Eucalyptus by starting instances,
        stopping them before they are running, and increase the time to terminate on each
        iteration.  This test case leverages the BasicInstanceChecks test case. The
        following steps are ran:
            - runs BasicInstanceChecks test case 5 times, 10 second apart.
            - While each test is running, run and terminate instances with a 10sec sleep in between.
            - When a test finishes, rerun BasicInstanceChecks test case.
        If any of these tests fail, the test case will error out; logging the results.
        """
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)

        available_instances_before = self.tester.get_available_vms(zone=self.zone)

        ## Run through count iterations of test
        count = 4
        future_instances =[]

        with ThreadPoolExecutor(max_workers=count) as executor:
            ## Start asynchronous activity
            ## Run 5 basic instance check instances 10s apart
            for i in xrange(count):
                future_instances.append(executor.submit(self.BasicInstanceChecks))
                self.tester.sleep(10)

        with ThreadPoolExecutor(max_workers=count) as executor:
            ## Start asynchronous activity
            ## Terminate all instances
            for future in future_instances:
                executor.submit(self.tester.terminate_instances,future.result())

        def available_after_greater():
            return self.tester.get_available_vms(zone=self.zone) >= available_instances_before
        self.tester.wait_for_result(available_after_greater, result=True, timeout=360)

    def PrivateIPAddressing(self):
        """
        This case was developed to test instances that are launched with private-addressing
        set to True.  The tests executed are as follows:
            - run an instance with private-addressing set to True
            - allocate/associate/disassociate/release an Elastic IP to that instance
            - check to see if the instance went back to private addressing
        If any of these tests fail, the test case will error out; logging the results.
        """
        if self.reservation:
            for instance in self.reservation.instances:
                if instance.ip_address == instance.private_ip_address:
                    self.tester.debug("WARNING: System or Static mode detected, skipping PrivateIPAddressing")
                    return self.reservation
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)
        reservation = self.tester.run_instance(private_addressing=True, **self.run_instance_params)
        for instance in reservation.instances:
            address = self.tester.allocate_address()
            self.assertTrue(address,'Unable to allocate address')
            self.tester.associate_address(instance, address)
            self.tester.sleep(30)
            instance.update()
            self.assertTrue( self.tester.ping(instance.ip_address), "Could not ping instance with new IP")
            address.disassociate()
            self.tester.sleep(30)
            instance.update()
            self.assertFalse(self.tester.ping(instance.ip_address), "Was able to ping instance that should have only had a private IP")
            address.release()
            if instance.ip_address != "0.0.0.0" and instance.ip_address != instance.private_ip_address:
                self.fail("Instance received a new public IP: " + instance.ip_address)
        self.tester.terminate_instances(self.reservation)
        self.set_reservation(None)
        return reservation

    def ReuseAddresses(self):
        """
        This case was developed to test when you run instances in a series, and make sure
        they get the same address.  The test launches an instance, checks the IP information,
        then terminates the instance. This test is launched 5 times in a row.  If there
        is an error, the test case will error out; logging the results.
        """
        prev_address = None
        if self.reservation:
            self.tester.terminate_instances(self.reservation)
            self.set_reservation(None)
        for i in xrange(5):
            reservation = self.tester.run_instance(**self.run_instance_params)
            for instance in reservation.instances:
                if prev_address is not None:
                    self.assertTrue(re.search(str(prev_address) ,str(instance.ip_address)), str(prev_address) +" Address did not get reused but rather  " + str(instance.public_dns_name))
                prev_address = instance.ip_address
            self.tester.terminate_instances(reservation)