Example #1
0
class WalrusConcurrent(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("-n", "--number", type=int, default=100)
        self.parser.add_argument("-c", "--concurrent", type=int, default=10)
        self.parser.add_argument("-s", "--size", type=int, default=1024)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = S3ops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.start = time.time()
        self.bucket_name = "concurrency-" + str(int(self.start))
        self.tester.create_bucket(self.bucket_name)

    def clean_method(self):
        self.tester.clear_bucket(self.bucket_name)

    def Concurrent(self):
        key_payload = self.tester.id_generator(self.args.size)
        thread_count = self.args.number
        thread_pool = []
        with ThreadPoolExecutor(max_workers=thread_count) as executor:
                for i in xrange(thread_count):
                        thread_pool.append(executor.submit(self.tester.upload_object, bucket_name=self.bucket_name, key_name="test" + str(i), contents=key_payload))
        end = time.time()
        total = end - self.start
        self.tester.debug("\nExecution time: {0}\n# of Objects: {1}\nObject Size: {2}B\nConcurrency Level of {3}".format(
                            total, self.args.number, self.args.size, self.args.concurrent))
        with ThreadPoolExecutor(max_workers=thread_count) as executor:
                for object in thread_pool:
                        thread_pool.append(executor.submit(self.tester.delete_object, object))
Example #2
0
class ResourceGeneration(EutesterTestCase):
    
    def __init__(self, credpath):
        self.tester = Eucaops(credpath=credpath)


    def CreateResources(self):
        users = self.tester.get_all_users() 
        testers = []
        for user in users:
            keys = self.tester.create_access_key(user_name=user['user_name'], delegate_account=user['account_name'])
            testers.append(Eucaops(aws_access_key_id=keys['access_key_id'], aws_secret_access_key=keys['secret_access_key'], ec2_ip=self.tester.ec2.host, s3_ip=self.tester.s3.host))
            
        for tester in testers:
            import random
            zone = random.choice(tester.get_zones())
            volume = self.tester.create_volume(size=1, azone=zone)
            snapshot = self.tester.create_snapshot(volume_id=volume.id)
            volume_from_snap = self.tester.create_volume(snapshot=snapshot, azone=zone)
            bucket = self.tester.create_bucket(self.tester.id_generator(12, string.ascii_lowercase  + string.digits))
            key = self.tester.upload_object(bucket_name= bucket.name, key_name= self.tester.id_generator(12, string.ascii_lowercase  + string.digits), contents= self.tester.id_generator(200))
            keypair = self.tester.add_keypair(self.tester.id_generator())
            group = self.tester.add_group(self.tester.id_generator())
    
    def run_suite(self):  
        self.testlist = [] 
        testlist = self.testlist
        testlist.append(self.create_testcase_from_method(self.CreateResources))
        self.run_test_case_list(testlist)  
Example #3
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self, config_file="cloud.conf", password="******"):
        self.tester = Eucaops(config_file=config_file, password=password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" +
                                           self.start_time)
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ###
        self.standing_reservation = self.tester.run_instance(
            keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(
            self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(
            self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(
            self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(
            self.standing_bucket_name, self.standing_key_name)

    def run_testcase(self, testcase_callback, **kwargs):
        poll_count = 20
        poll_interval = 20
        while (poll_count > 0):
            try:
                testcase_callback(**kwargs)
                break
            except Exception, e:
                self.tester.debug("Attempt failed due to: " + str(e) +
                                  "\nRetrying testcase in " +
                                  str(poll_interval))
            self.tester.sleep(poll_interval)
            poll_count = poll_count - 1
        if poll_count is 0:
            self.fail("Could not run an instance after " + str(poll_count) +
                      " tries with " + str(poll_interval) +
                      "s sleep in between")
Example #4
0
class WalrusConcurrent(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("-n", "--number", type=int, default=100)
        self.parser.add_argument("-c", "--concurrent", type=int, default=10)
        self.parser.add_argument("-s", "--size", type=int, default=1024)
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = S3ops(credpath=self.args.credpath,
                                region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)
        self.start = time.time()
        self.bucket_name = "concurrency-" + str(int(self.start))
        self.tester.create_bucket(self.bucket_name)

    def clean_method(self):
        self.tester.clear_bucket(self.bucket_name)

    def Concurrent(self):
        key_payload = self.tester.id_generator(self.args.size)
        thread_count = self.args.number
        thread_pool = []
        with ThreadPoolExecutor(max_workers=thread_count) as executor:
            for i in xrange(thread_count):
                thread_pool.append(
                    executor.submit(self.tester.upload_object,
                                    bucket_name=self.bucket_name,
                                    key_name="test" + str(i),
                                    contents=key_payload))
        end = time.time()
        total = end - self.start
        self.tester.debug(
            "\nExecution time: {0}\n# of Objects: {1}\nObject Size: {2}B\nConcurrency Level of {3}"
            .format(total, self.args.number, self.args.size,
                    self.args.concurrent))
        with ThreadPoolExecutor(max_workers=thread_count) as executor:
            for object in thread_pool:
                thread_pool.append(
                    executor.submit(self.tester.delete_object, object))
Example #5
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        self.tester = Eucaops(config_file=self.args.config_file,
                              password=self.args.password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" +
                                               self.start_time)
            self.tester.authorize_group_by_name(group_name=self.group.name)
            self.tester.authorize_group_by_name(group_name=self.group.name,
                                                port=-1,
                                                protocol="icmp")
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair("keypair-" +
                                                   self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            self.image = self.tester.get_emi(root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc(
            ).machine
            self.version = self.tester.clc.sys(
                "cat " + self.tester.eucapath +
                "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(
                keypair=self.keypair.name,
                group=self.group.name,
                zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(
                self.volume)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(
                self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(
                self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(
                self.standing_bucket_name, self.standing_key_name)
        except Exception, e:
            self.clean_method()
Example #6
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        if not boto.config.has_section('Boto'):
            boto.config.add_section('Boto')
            boto.config.set('Boto', 'num_retries', '1')
            boto.config.set('Boto', 'http_socket_timeout', '20')
        self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password)
        self.tester.ec2.connection.timeout = 30
        self.servman = self.tester.service_manager
        self.instance_timeout = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" + self.start_time )
            self.tester.authorize_group_by_name(group_name=self.group.name )
            self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            if self.args.emi:
                self.image = self.tester.get_emi(self.args.emi)
            else:
                self.image = self.tester.get_emi(root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc().machine
            self.version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(image=self.image ,keypair=self.keypair.name,group=self.group.name, zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
            for instance in self.standing_reservation.instances:
                instance.sys("echo " + instance.id  + " > " + self.device)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)
            self.run_instance_params = {'image': self.image, 'keypair': self.keypair.name, 'group': self.group.name,
                                        'zone': self.zone, 'timeout': self.instance_timeout}
        except Exception, e:
            self.clean_method()
            raise Exception("Init for testcase failed. Reason: " + str(e))
Example #7
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self, config_file="cloud.conf", password="******"):
        self.tester = Eucaops( config_file=config_file, password=password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" + self.start_time )
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        
        
        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ### 
        self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)
        

        
            
    def run_testcase(self, testcase_callback, **kwargs):
        poll_count = 20
        poll_interval = 20       
        while (poll_count > 0):
            try:
                testcase_callback(**kwargs)
                break
            except Exception, e:
                self.tester.debug("Attempt failed due to: " + str(e)  + "\nRetrying testcase in " + str(poll_interval) )
            self.tester.sleep(poll_interval)     
            poll_count = poll_count - 1  
        if poll_count is 0:
            self.fail("Could not run an instance after " + str(poll_count) +" tries with " + str(poll_interval) + "s sleep in between")
Example #8
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        self.tester = Eucaops( config_file=self.args.config_file, password=self.args.password)
        self.servman = self.tester.service_manager
        self.tester.poll_count = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        self.group = self.tester.add_group(group_name="group-" + self.start_time )
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + self.start_time)
        self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.private_addressing = False
        self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
        self.test_user_id = self.tester.s3.get_canonical_user_id()
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name

        self.tester.clc = self.tester.service_manager.get_enabled_clc().machine
        self.old_version = self.tester.clc.sys("cat " + self.tester.eucapath + "/etc/eucalyptus/eucalyptus-version")[0]
        ### Create standing resources that will be checked after all failures
        ### Instance, volume, buckets
        ### 
        self.standing_reservation = self.tester.run_instance(keypair=self.keypair.name,group=self.group.name, zone=self.zone)
        self.volume = self.tester.create_volume(self.zone)
        self.device = self.standing_reservation.instances[0].attach_volume(self.volume)
        self.standing_bucket_name = "failover-bucket-" + self.start_time
        self.standing_bucket = self.tester.create_bucket(self.standing_bucket_name)
        self.standing_key_name = "failover-key-" + self.start_time
        self.standing_key = self.tester.upload_object(self.standing_bucket_name, self.standing_key_name)
        self.standing_key = self.tester.get_objects_by_prefix(self.standing_bucket_name, self.standing_key_name)

    def clean_method(self):
        try:
            self.tester.terminate_instances()
        except Exception, e:
            self.tester.critical("Unable to terminate all instances")
        self.servman.start_all()
Example #9
0
class ResourceGeneration(EutesterTestCase):
    def __init__(self, credpath):
        self.tester = Eucaops(credpath=credpath)

    def CreateResources(self):
        users = self.tester.get_all_users()
        testers = []
        for user in users:
            keys = self.tester.create_access_key(
                user_name=user['user_name'],
                delegate_account=user['account_name'])
            testers.append(
                Eucaops(aws_access_key_id=keys['access_key_id'],
                        aws_secret_access_key=keys['secret_access_key'],
                        ec2_ip=self.tester.ec2.host,
                        s3_ip=self.tester.s3.host))

        for tester in testers:
            import random
            zone = random.choice(tester.get_zones())
            volume = self.tester.create_volume(size=1, azone=zone)
            snapshot = self.tester.create_snapshot(volume_id=volume.id)
            volume_from_snap = self.tester.create_volume(snapshot=snapshot,
                                                         azone=zone)
            bucket = self.tester.create_bucket(
                self.tester.id_generator(
                    12, string.ascii_lowercase + string.digits))
            key = self.tester.upload_object(
                bucket_name=bucket.name,
                key_name=self.tester.id_generator(
                    12, string.ascii_lowercase + string.digits),
                contents=self.tester.id_generator(200))
            keypair = self.tester.add_keypair(self.tester.id_generator())
            group = self.tester.add_group(self.tester.id_generator())

    def run_suite(self):
        self.testlist = []
        testlist = self.testlist
        testlist.append(self.create_testcase_from_method(self.CreateResources))
        self.run_test_case_list(testlist)
Example #10
0
class BucketTestSuite(EutesterTestCase):
    
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.endpoint:
            self.tester = S3ops(credpath=self.args.credpath, endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)

        self.bucket_prefix = "eutester-bucket-test-suite-" + str(int(time.time()))
        self.buckets_used = set()
        
    def test_bucket_get_put_delete(self):
        '''
        Method: Tests creating and deleting buckets as well as getting the bucket listing
        '''
        test_bucket=self.bucket_prefix + "-simple-test-bucket"
        self.buckets_used.add(test_bucket)
        self.tester.debug("Starting get/put/delete bucket test using bucket name: " + test_bucket)
 
        if self.args.endpoint:
            # If testing against any region besides us-east-1, the location
            # parameter of a create bucket request cannot be blank (us-east-1).
            self.tester.info('WARNING: The following AWS tests will correctly fail if we are testing against any region')
            self.tester.info('besides us-east-1 (endpoint s3.amazonaws.com), because the expected results are for us-east-1.')
        try :
            bucket = self.tester.s3.create_bucket(test_bucket)                
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + str(e))
        
        try :    
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket +" was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + str(e))
            
        
        self.tester.s3.delete_bucket(test_bucket)        
        try :
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)            
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug( "Correctly got exception trying to get a deleted bucket! " )
            
        self.tester.debug( "Testing an invalid bucket names, calls should fail." )
        def test_creating_bucket_invalid_names(bad_bucket):
            should_fail = False
            try:
                bucket = self.tester.create_bucket(bad_bucket)
                should_fail = True            
                try:
                    self.tester.delete_bucket(bucket)
                except:
                    self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
            except Exception as e:
                self.tester.debug("Correctly caught the exception for bucket name '" + bad_bucket + "' Reason: " + e.reason)
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)

        # with the EUCA-8864 fix, a new property 'objectstorage.bucket_naming_restrictions'
        # has been introduced, now 'bucket..123', 'bucket.' are actually valid bucket names
        # when using 'extended' naming convention.
        # http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
        # when DNS is not being used, for now buckets can be created with bucket
        # names like '/bucket123', 'bucket123/', see EUCA-8863
        # TODO check what bucket naming convention is being used for the test
        for bad_bucket in ["bucket&123", "bucket*123"]:
            test_creating_bucket_invalid_names(self.bucket_prefix + bad_bucket)

        """
        Test creating bucket with null name
        """
        try:
            null_bucket_name = ""
            bucket_obj = self.tester.create_bucket(null_bucket_name)
            self.tester.sleep(10)
            if bucket_obj:
                self.fail("Should have caught exception for creating bucket with empty-string name.")
        except S3ResponseError as e:
            assert (e.status == 405), 'Expected response status code to be 405, actual status code is ' + str(e.status)
            assert (re.search("MethodNotAllowed", e.code)), "Incorrect exception returned when creating bucket with null name."
        except Exception, e:
            self.tester.debug("Failed due to EUCA-7059 " + str(e))
Example #11
0
class BucketTestSuite(EutesterTestCase):
    
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--s3endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.s3endpoint:
            self.tester = S3ops( credpath=self.args.credpath, endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)

        self.bucket_prefix = "eutester-bucket-test-suite-" + str(int(time.time())) + "-"
        self.buckets_used = set()
        
    def test_bucket_get_put_delete(self):
        '''
        Method: Tests creating and deleting buckets as well as getting the bucket listing
        '''
        test_bucket=self.bucket_prefix + "simple_test_bucket"
        self.buckets_used.add(test_bucket)
        self.tester.debug("Starting get/put/delete bucket test using bucket name: " + test_bucket)
 
        try :
            bucket = self.tester.s3.create_bucket(test_bucket)                
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + e)
        
        try :    
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket +" was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + e)
            
        
        self.tester.s3.delete_bucket(test_bucket)        
        try :
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)            
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug( "Correctly got exception trying to get a deleted bucket! " )
            
        self.tester.debug( "Testing an invalid bucket names, calls should fail." )
        try:
            bad_bucket = self.bucket_prefix + "bucket123/"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket.123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket&123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket*123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "/bucket123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )

        """
        Test creating bucket with null name
        """
        try:
            null_bucket_name = ""
            bucket_obj = self.tester.create_bucket(null_bucket_name)
            self.tester.sleep(10)
            if bucket_obj:
                self.fail("Should have caught exception for creating bucket with empty-string name.")
        except S3ResponseError as e:
            self.assertEqual(e.status, 405, 'Expected response status code to be 405, actual status code is ' + str(e.status))
            self.assertTrue(re.search("MethodNotAllowed", e.code), "Incorrect exception returned when creating bucket with null name.")
        except Exception, e:
            self.tester.debug("Failed due to EUCA-7059 " + str(e))
Example #12
0
class ObjectTestSuite(EutesterTestCase):
    data_size = 1000

    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.endpoint:
            self.tester = S3ops(credpath=self.args.credpath, endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)

        self.bucket_prefix = "eutester-" + str(int(time.time())) + "-"
        self.buckets_used = set()
        random.seed(time.time())
        self.test_bucket_name = self.bucket_prefix + str(random.randint(0,100))
        self.test_bucket = self.tester.create_bucket(self.test_bucket_name)
        self.buckets_used.add(self.test_bucket_name)
        #Create some test data for the objects
        def ensure_bucket_exists():
            try:
                self.tester.s3.get_bucket(self.test_bucket_name)
                return True
            except Exception:
                return False
        self.tester.wait_for_result(ensure_bucket_exists, True)
        self.test_object_data = ""
        for i in range(0, self.data_size):
            self.test_object_data += chr(random.randint(32,126))
        print "Generated data for objects: " + self.test_object_data


    def print_key_info(self, keys=None):
        for key in keys:
            self.tester.info("Key=" + str(key.key) + " -- version= " + str(key.version_id) + " -- eTag= " + str(key.etag)
                             + " -- ACL= " + str(key.get_xml_acl()))

    def put_object(self, bucket=None, object_key=None, object_data=None):
        """Puts an object with the specified name and data in the specified bucket"""
        if bucket == None:
            raise Exception("Cannot put object without proper bucket reference")

        try :
            key = Key(bucket=bucket,name=object_key)
            key.set_contents_from_string(object_data)
            return key.etag
        except Exception as e:
            self.tester.debug("Exception occured during 'PUT' of object " + object_key + " into bucket " + bucket.name + ": " + e.message)
            raise e

    def post_object(self, bucket_name=None, object_key=None, object_data=None, policy=None, acl=None):
        """Uploads an object using POST + form upload"""
        fields = {
            'key' : object_key,
            'acl' : acl,
            'AWSAccessKeyId': self.tester.get_access_key(),
            'Policy' : policy,
            'Signature': self.sign_policy(sak=self.tester.get_secret_key(), b64_policy_json=policy)
        }

        self.tester.info('Fields: ' + str(fields))
        url = 'http://' + self.tester.s3.host + ':' + str(self.tester.s3.port) \
              + '/' + self.tester.s3.path + '/' + bucket_name

        self.tester.debug('Sending POST request to: ' + url)
        response = requests.post(url, data=fields, files={'file': BytesIO(object_data)})
        return response
        #return None

    def generate_default_policy_b64(self, bucket, key, acl):
        delta = timedelta(hours=1)
        expire_time = (datetime.utcnow() + delta).replace(microsecond=0)

        policy = {'conditions': [{'acl': acl},
                                 {'bucket': bucket},
                                 {'key': key},
                                ],
                  'expiration': time.strftime('%Y-%m-%dT%H:%M:%SZ',
                                              expire_time.timetuple())}
        policy_json = json.dumps(policy)
        self.tester.info('generated default policy: %s', policy_json)
        return base64.b64encode(policy_json)

    def sign_policy(self, sak=None, b64_policy_json=None):
        my_hmac = hmac.new(sak, digestmod=hashlib.sha1)
        my_hmac.update(b64_policy_json)
        return base64.b64encode(my_hmac.digest())

     
    def enable_versioning(self, bucket):
        """Enable versioning on the bucket, checking that it is not already enabled and that the operation succeeds."""
        vstatus = bucket.get_versioning_status()
        if vstatus != None and len(vstatus.keys()) > 0 and vstatus['Versioning'] != None and vstatus['Versioning'] != 'Disabled':
            self.tester.info("Versioning status should be null/Disabled, found: " + vstatus['Versioning'])
            return False
        else:
            self.tester.info("Bucket versioning is Disabled")

        #Enable versioning
        bucket.configure_versioning(True)
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
            return True
        else:
            self.tester.info("Versioning status not enabled, should be.")
            return False
        return False

    def suspend_versioning(self, bucket):
        """Suspend versioning on the bucket, checking that it is previously enabled and that the operation succeeds."""
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info("Versioning status not enabled, should be. Can't suspend if not enabled....")
            return False

        #Enable versioning
        bucket.configure_versioning(False)
        if bucket.get_versioning_status()['Versioning'] == 'Suspended':
            self.tester.info("Versioning status correctly set to suspended")
            return True
        else:
            self.tester.info("Versioning status not suspended.")
            return False
        return False

    def check_version_listing(self, version_list, total_expected_length):
        """Checks a version listing for both completeness and ordering as well as pagination if required"""
        self.tester.info("Checking bucket version listing. Listing is " + str(len(version_list)) + " entries long")
        if total_expected_length >= 1000:
            assert(len(version_list) == 999)
        else:
            assert(len(version_list) == total_expected_length)

        prev_obj = None
        should_fail = None
        for obj in version_list:
            if isinstance(obj,Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) > 0:
                        should_fail = obj
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))
        return should_fail

    def compare_versions(self, key1, key2):
        """
        Returns -1 if key1 < key2, 0 if equal, and 1 if key1 > key2. 
        Compares names lexicographically, if equal, compares date_modified if versions are different. 
        If version_id and name are equal then key1 = key2
        If an error occurs or something is wrong, returns None
        """
        if key1.name < key2.name:
            #self.debug("Key1: " + key1.name + " is less than " + key2.name)
            return 1
        elif key1.name > key2.name:
            #self.debug("Key1: " + key1.name + " is greater than " + key2.name)
            return -1
        else:
            if key1.version_id == key2.version_id:
                #self.debug("Key1: " + key1.name + " is the same version as " + key2.name)
                return 0
            else:
                if dateutil.parser.parse(key1.last_modified) > dateutil.parser.parse(key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is greater than " + key2.last_modified)
                    return 1
                elif dateutil.parser.parse(key1.last_modified) < dateutil.parser.parse(key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is less than " + key2.last_modified)
                    return -1
        return None

    def test_object_basic_ops(self):
        """
        Tests basic operations on objects: simple GET,PUT,HEAD,DELETE.
        
        """
        self.tester.info("Basic Object Operations Test (GET/PUT/HEAD)")
        if self.test_bucket == None:
            self.fail("Error: test_bucket not set, cannot run test")

        #Test PUT & GET
        testkey="testkey1-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)

        ret_key = self.test_bucket.get_key(testkey)
        ret_content = ret_key.get_contents_as_string()

        if ret_content == self.test_object_data:
            self.tester.info("Set content = get content, put passed")
        else:
            if ret_content != None:
                self.tester.info("Got content: " + ret_content)
            else:
                self.tester.info("No content returned")
            self.tester.info("Expected content: " + self.test_object_data)
            self.fail("Put content not the same as what was returned")

        #Test HEAD
        key_meta = self.test_bucket.get_key(testkey)
        if key_meta.key != ret_key.key or key_meta.etag != ret_key.etag or key_meta.size != ret_key.size:
            self.tester.info("Something is wrong, the HEAD operation returned different metadata than the GET operation")
            self.tester.info("Expected key " + ret_key.key + " etag: " + ret_key.etag + " Got: " + key_meta.key + " etag: " + key_meta.etag)
        else:
            self.tester.info("HEAD meta = GET meta, all is good")

        #Test copy operation (GET w/source headers)
        self.tester.info("Testing COPY object")

        new_key = "testkey2"
        self.test_bucket.copy_key(new_key_name=new_key, src_bucket_name=self.test_bucket_name, src_key_name=testkey)
        keylist = self.test_bucket.list()
        counter = 0
        for k in keylist:
            if isinstance(k, Prefix):
                self.tester.info("Prefix: " + "NULL" if k == None else k.name)
            else:
                self.tester.info("Key: " + k.name + " Etag: " + k.etag)
                counter += 1
        if counter != 2:
            self.fail("Expected 2 keys after copy operation, found only: " + len(keylist))
        try:
            ret_key = self.test_bucket.get_key(new_key)
        except:
            self.fail("Could not get object copy")
        if ret_key == None:
            self.fail("Could not get object copy")

        if self.test_bucket.get_key(testkey).get_contents_as_string() != ret_key.get_contents_as_string():
            self.fail("Contents of original key and copy don't match")
        else:
            self.tester.info("Copy key contents match original!")

        #Test DELETE
        self.test_bucket.delete_key(testkey)
        ret_key = None
        ret_key = self.test_bucket.get_key(testkey)
        if ret_key:
            self.tester.info("Erroneously got: " + ret_key.name)
            raise S3ResponseError(404, "Should have thrown exception for getting a non-existent object")
        self.tester.info("Finishing basic ops test")

    def test_object_byte_offset_read(self):
        """Tests fetching specific byte offsets of the object"""
        self.tester.info("Byte-range Offset GET Test")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        testkey = "rangetestkey-" + str(int(time.time()))
        source_bytes = bytearray(self.test_object_data)

        #Put the object initially
        self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)

        #Test range for first 100 bytes of object
        print "Trying start-range object get"
        try:
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=0-99"})
        except:
            self.fail("Failed range object get first 100 bytes")

        startrangedata = bytearray(data_str)
        print "Got: " + startrangedata
        print "Expected: " + str(source_bytes[:100])
        start = 0
        for i in range(0,100):
            if startrangedata[i-start] != source_bytes[i]:
                print "Byte: " + startrangedata[i] + " differs!"
                self.fail("Start-range Ranged-get failed")

        print "Trying mid-object range"
        try:
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=500-599"})
        except:
            self.fail("Failed range object get for middle 100 bytes")
        midrangedata = bytearray(data_str)
        start = 500
        for i in range(start,start+100):
            if midrangedata[i-start] != source_bytes[i]:
                print "Byte: " + midrangedata[i] + "differs!"
                self.fail("Mid-range Ranged-get failed")

        print "Trying end-range object get"
        #Test range for last 100 bytes of object
        try:
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=800-899"})
        except:
            self.fail("Failed range object get for last 100 bytes")

        endrangedata = bytearray(data_str)
        print "Got: " + str(endrangedata)
        start = 800
        try:
            for i in range(start,start+100):
                if endrangedata[i-start] != source_bytes[i]:
                    print "Byte: " + endrangedata[i] + "differs!"
                    self.fail("End-range Ranged-get failed")
        except Exception as e:
            print "Exception! Received: " + e

        print "Range test complete"

    def test_object_post(self):
        """Test the POST method for putting objects, requires a pre-signed upload policy and url"""
        self.tester.info("Testing POST form upload on bucket" + self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        itr = 1
        self.tester.info('Doing ' + str(itr) + ' POST upload iterations')
        acl = 'ec2-bundle-read'
        for k in xrange(0, itr):
            key = 'postkey1' + str(k)
            data = os.urandom(512)
            computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
            self.tester.info('Data md5: ' + computed_md5 + ' data length: ' + str(len(computed_md5)))
            self.tester.info('Uploading object ' + self.test_bucket_name + '/' + key + ' via POST with acl : ' + acl)
            response = self.post_object(bucket_name=self.test_bucket_name,
                                        object_key=key,
                                        object_data=data,
                                        acl=acl,
                                        policy=self.generate_default_policy_b64(self.test_bucket_name, key, acl=acl))

            self.tester.info('Got response for POST: ' + str(response.status_code) + ': ' + str(response.text))
            assert(response.status_code == 204)
            fetched_key = self.test_bucket.get_key(key)
            fetched_content = fetched_key.get_contents_as_string()
            self.tester.info('Got fetched md5: ' + fetched_key.etag)
            self.tester.info('Calculated md5: ' + computed_md5 + ' recieved md5 ' + fetched_key.etag)
            assert(fetched_key.etag == computed_md5)
            assert(fetched_content == data)

        self.tester.info("Done with upload test")

    def test_object_post_large(self):
        """Test the POST method for putting objects, requires a pre-signed upload policy and url"""
        self.tester.info("Testing POST form upload on bucket" + self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        self.tester.info("Testing POST form upload of 10MB data on bucket" + self.test_bucket_name)
        itr = 1
        large_obj_size_bytes = 10 * 1024 * 1024 #10MB content
        self.tester.info('Doing ' + str(itr) + ' iterations of large object of size ' + str(large_obj_size_bytes) + ' with POST')
        acl = 'ec2-bundle-read'
        for i in xrange(0, itr):
            key = 'postkey_10mb_' + str(i)
            self.tester.info('Generating ' + str(large_obj_size_bytes) + ' bytes for upload')
            #Create some test data
            data = str(os.urandom(large_obj_size_bytes))

            self.tester.info("Data length: " + str(len(data)))
            computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
            self.tester.info('Data md5 is: ' + computed_md5)

            self.tester.info('Uploading object via POST using acl: ' + acl)
            response = self.post_object(bucket_name=self.test_bucket.name,
                                        object_key=key,
                                        object_data=data,
                                        policy=self.generate_default_policy_b64(self.test_bucket.name, key, acl=acl),
                                        acl=acl)

            self.tester.info('Got response for POST: ' + str(response.status_code) + ': ' + str(response.text))
            assert(response.status_code == 204)

            self.tester.info('Fetching the content for verification')
            fetched_key = self.test_bucket.get_key(key_name=key)
            self.tester.info('Got fetched content length : ' + str(fetched_key.size) + ' Expected ' + str(len(data)))
            assert(fetched_key.size == len(data))
            self.tester.info('Got fetched md5: ' + fetched_key.etag)
            self.tester.info('Calculated md5: ' + computed_md5 + ' recieved md5 ' + fetched_key.etag)
            assert(fetched_key.etag == computed_md5)
            fetched_content = fetched_key.get_contents_as_string()
            assert(fetched_content == data)


    def test_object_large_objects(self):
        """Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface"""
        self.tester.info("Testing large-ish objects over 1MB in size on bucket" + self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        test_data = None
        large_obj_size_bytes = 5 * 1024 * 1024 #5MB
        self.tester.info("Generating " + str(large_obj_size_bytes) + " bytes of data")

        #Create some test data
        #for i in range(0, large_obj_size_bytes):
        #    test_data += chr(random.randint(32,126))
        test_data = bytearray(os.urandom(large_obj_size_bytes))

        self.tester.info("Uploading object content of size: " + str(large_obj_size_bytes) + " bytes")
        keyname = "largeobj-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=test_data)
        self.tester.info("Done uploading object")

        ret_key = self.test_bucket.get_key(keyname)
        ret_data = ret_key.get_contents_as_string()

        if ret_data != test_data:
            self.fail("Fetched data and generated data don't match")
        else:
            self.tester.info("Data matches!")

        self.tester.info("Removing large object")
        self.test_bucket.delete_key(ret_key)
        self.tester.info("Complete large object test")
        pass

    def test_object_multipart(self):
        """Tests basic multipart upload functionality"""
        self.tester.info("Testing Multipart")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        self.test_multipart_upload()
        self.test_abort_multipart_upload()

    def test_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing multipart upload")
        self.tester.info("Creating random file representing part...")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname="multi-" + str(int(time.time()))
        self.tester.info("Initiating multipart upload...much upload")
        reply = self.initiate_multipart_upload(keyname)
        self.tester.info("Uploading parts...Such Parts")
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.tester.info("Listing parts...")
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Completing upload...So OSG")
        reply.complete_upload()
        temp_file.close()
        self.tester.info("HEAD request...");
        returned_key = self.test_bucket.get_key(keyname, validate=True);
        download_temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="mpu-download")
        self.tester.info("Downloading object...very mpu");
        returned_key.get_contents_to_file(download_temp_file);
        self.tester.info("Deleting object...WOW")
        self.test_bucket.delete_key(keyname)

    def test_abort_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing abort multipart upload")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname="multi-" + str(int(time.time()))
        reply = self.initiate_multipart_upload(keyname)
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Canceling upload")
        reply.cancel_upload()
        temp_file.close()

    def initiate_multipart_upload(self, keyname):
        self.tester.info("Initiating multipart upload " + keyname)
        return self.test_bucket.initiate_multipart_upload(keyname)

    def test_object_versioning_enabled(self):
        """Tests object versioning for get/put/delete on a versioned bucket"""
        self.tester.info("Testing bucket Versioning-Enabled")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not properly enable versioning")

        #Create some keys
        keyname = "versionkey-" + str(int(time.time()))

        #Multiple versions of the data
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"

        #Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly
        self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v1data)

        #Get v1
        obj_v1 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v1.etag,data=v1data)
        
        self.tester.info("Initial bucket state after object uploads with versioning enabled:")
        self.print_key_info(keys=[obj_v1])

        self.tester.info("Adding another version")
        #Put v2 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v2data)
        obj_v2 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v2.etag,data=v2data)
        self.print_key_info(keys=[obj_v1, obj_v2])

        self.tester.info("Adding another version")
        #Put v3 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
        obj_v3 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
        self.print_key_info(keys=[obj_v1, obj_v2, obj_v3])

        self.tester.info("Getting specific version")
        #Get a specific version, v1
        v1_return = self.test_bucket.get_key(key_name=keyname,version_id=obj_v1.version_id)
        self.print_key_info(keys=[v1_return])

        #Delete current latest version (v3)
        self.test_bucket.delete_key(keyname)

        del_obj = self.test_bucket.get_key(keyname)
        if del_obj:
            self.tester.info("Erroneously got: " + del_obj.name)
            raise S3ResponseError(404, "Should have thrown this exception for getting a non-existent object")

        #Restore v1 using copy
        self.tester.info("Restoring version")
        try:
            self.test_bucket.copy_key(new_key_name=obj_v1.key,src_bucket_name=self.test_bucket_name,src_key_name=keyname,src_version_id=obj_v1.version_id)
        except S3ResponseError as e:
            self.fail("Failed to restore key from previous version using copy got error: " + str(e.status))

        restored_obj = self.test_bucket.get_key(keyname)
        assert(restored_obj != None)
        self.tester.check_md5(eTag=restored_obj.etag,data=v1data)
        self.print_key_info(keys=[restored_obj])

        #Put v3 again
        self.tester.info("Adding another version")
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
        self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
        self.print_key_info([self.test_bucket.get_key(keyname)])

        #Delete v2 explicitly
        self.test_bucket.delete_key(key_name=obj_v2.key,version_id=obj_v2.version_id)
        del_obj = self.test_bucket.get_key(keyname,version_id=obj_v2.version_id)
        if del_obj:
            raise S3ResponseError("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead",404)

        #Show what's on top
        top_obj = self.test_bucket.get_key(keyname)
        self.print_key_info([top_obj])
        self.tester.check_md5(eTag=top_obj.etag,data=v3data)

        self.tester.info("Finished the versioning enabled test. Success!!")

    def clear_and_rebuild_bucket(self, bucket_name):
        self.tester.clear_bucket(bucket_name)
        return self.tester.create_bucket(bucket_name)

    def test_object_versionlisting(self):
        """
        Tests object version listing from a bucket
        """
        version_max = 3
        keyrange = 20
        self.tester.info("Testing listing versions in a bucket and pagination using " + str(keyrange) + " keys with " + str(version_max) + " versions per key")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not enable versioning properly. Failing")

        key = "testkey-" + str(int(time.time()))
        keys = [ key + str(k) for k in range(0,keyrange)]
        contents = [ self.test_object_data + "--v" + str(v) for v in range(0,version_max)]

        try:
            for keyname in keys:
                #Put version_max versions of each key
                for v in range(0,version_max):
                    self.tester.info("Putting: " + keyname + " version " + str(v))
                    self.test_bucket.new_key(keyname).set_contents_from_string(contents[v])
        except S3ResponseError as e:
            self.fail("Failed putting object versions for test: " + str(e.status))
        listing = self.test_bucket.get_all_versions()
        self.tester.info("Bucket version listing is " + str(len(listing)) + " entries long")
        if keyrange * version_max >= 1000:
            if not len(listing) == 999:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception("Bucket version listing did not limit the response to 999. Instead: " + str(len(listing)))
        else:
            if not len(listing) == keyrange * version_max:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception("Bucket version listing did not equal the number uploaded. Instead: " + str(len(listing)))

        prev_obj = None
        for obj in listing:
            if isinstance(obj,Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) <= 0:
                        raise Exception("Version listing not sorted correctly, offending key: " + obj.name + " version: " + obj.version_id + " date: " + obj.last_modified)
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))

    def test_object_versioning_suspended(self):
        """Tests object versioning on a suspended bucket, a more complicated test than the Enabled test"""
        self.tester.info("Testing bucket Versioning-Suspended")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        #Create some keys
        keyname1 = "versionkey1-" + str(int(time.time()))
        keyname2 = "versionkey2-" + str(int(time.time()))
        keyname3 = "versionkey3-" + str(int(time.time()))
        keyname4 = "versionkey4-" + str(int(time.time()))
        keyname5 = "versionkey5-" + str(int(time.time()))
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"

        vstatus = self.test_bucket.get_versioning_status()
        if vstatus:
            self.fail("Versioning status should be null/Disabled but was: " + str(vstatus))
        else:
            self.tester.info("Bucket versioning is Disabled")

        self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname4, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname5, object_data=v1data)

        key1 = self.test_bucket.get_key(keyname1)
        key2 = self.test_bucket.get_key(keyname2)
        key3 = self.test_bucket.get_key(keyname3)
        key4 = self.test_bucket.get_key(keyname4)
        key5 = self.test_bucket.get_key(keyname5)

        self.tester.info("Initial bucket state after object uploads without versioning:")
        self.print_key_info(keys=[key1,key2,key3,key4,key5])



        #Enable versioning
        self.test_bucket.configure_versioning(True)
        if self.test_bucket.get_versioning_status():
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info("Versionign status not enabled, should be.")
        
        #Update a subset of the keys
        key1_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname1,object_data=v2data)
        key2_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname2,object_data=v2data)

        key3_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v2data)
        key3_etag3=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v3data)

        #Delete a key
        self.test_bucket.delete_key(keyname5)

        #Suspend versioning
        self.test_bucket.configure_versioning(False)

        #Get latest of each key
        key1=self.test_bucket.get_key(keyname1)
        key2=self.test_bucket.get_key(keyname2)
        key3=self.test_bucket.get_key(keyname3)
        key4=self.test_bucket.get_key(keyname4)
        key5=self.test_bucket.get_key(keyname5)

        #Delete a key

        #Add a key

        #Add same key again

        #Fetch each key

    def test_object_acl(self):
        """Tests object acl get/set and manipulation"""
        self.fail("Test not implemented")

        #TODO: test custom and canned acls that are both valid an invalid

    def test_object_torrent(self):
        """Tests object torrents"""
        self.fail("Feature not implemented yet")

    def clean_method(self):
        '''This is the teardown method'''
        #Delete the testing bucket if it is left-over
        self.tester.info('Deleting the buckets used for testing')
        for bucket in self.buckets_used:
            try:
                self.tester.info('Checking bucket ' + bucket + ' for possible cleaning/delete')
                if self.tester.s3.head_bucket(bucket) != None:
                    self.tester.info('Found bucket exists, cleaning it')
                    self.tester.clear_bucket(bucket)
                else:
                    self.tester.info('Bucket ' + bucket + ' not found, skipping')
            except Exception as e:
                self.tester.info('Exception checking bucket ' + bucket + ' Exception msg: ' + e.message)
        return

    def test_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing multipart upload")
        self.tester.info("Creating random file representing part...")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname="multi-" + str(int(time.time()))
        self.tester.info("Initiating multipart upload...much upload")
        reply = self.initiate_multipart_upload(keyname)
        self.tester.info("Uploading parts...Such Parts")
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.tester.info("Listing parts...")
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Completing upload...So OSG")
        reply.complete_upload()
        temp_file.close()
        self.tester.info("HEAD request...");
        returned_key = self.test_bucket.get_key(keyname, validate=True);
        download_temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="mpu-download")
        self.tester.info("Downloading object...very mpu");
        returned_key.get_contents_to_file(download_temp_file);
        self.tester.info("Deleting object...WOW")
        self.test_bucket.delete_key(keyname)

    def test_abort_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing abort multipart upload")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname="multi-" + str(int(time.time()))
        reply = self.initiate_multipart_upload(keyname)
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("!!!!!!!!!!!!!!!!!!!!!!!!!!DO NOT WANT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1")
        self.tester.info("Canceling upload")
        reply.cancel_upload()
        temp_file.close()

    def initiate_multipart_upload(self, keyname):
        self.tester.info("Initiating multipart upload " + keyname)
        return self.test_bucket.initiate_multipart_upload(keyname)
Example #13
0
class BucketTestSuite(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--s3endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.s3endpoint:
            self.tester = S3ops(credpath=self.args.credpath,
                                endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)

        self.bucket_prefix = "eutester-bucket-test-suite-" + str(
            int(time.time())) + "-"
        self.buckets_used = set()

    def test_bucket_get_put_delete(self):
        '''
        Method: Tests creating and deleting buckets as well as getting the bucket listing
        '''
        test_bucket = self.bucket_prefix + "simple_test_bucket"
        self.buckets_used.add(test_bucket)
        self.tester.debug(
            "Starting get/put/delete bucket test using bucket name: " +
            test_bucket)

        try:
            bucket = self.tester.s3.create_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + e)

        try:
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + e)

        self.tester.s3.delete_bucket(test_bucket)
        try:
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug(
                "Correctly got exception trying to get a deleted bucket! ")

        self.tester.debug(
            "Testing an invalid bucket names, calls should fail.")
        try:
            bad_bucket = self.bucket_prefix + "bucket123/"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket.123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket&123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket*123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "/bucket123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")
        """
        Test creating bucket with null name
        """
        try:
            null_bucket_name = ""
            bucket_obj = self.tester.create_bucket(null_bucket_name)
            self.tester.sleep(10)
            if bucket_obj:
                self.fail(
                    "Should have caught exception for creating bucket with empty-string name."
                )
        except S3ResponseError as e:
            self.assertEqual(
                e.status, 405,
                'Expected response status code to be 405, actual status code is '
                + str(e.status))
            self.assertTrue(
                re.search("MethodNotAllowed", e.code),
                "Incorrect exception returned when creating bucket with null name."
            )
        except Exception, e:
            self.tester.debug("Failed due to EUCA-7059 " + str(e))
Example #14
0
class BucketTestSuite(EutesterTestCase):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.endpoint:
            self.tester = S3ops(credpath=self.args.credpath,
                                endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)

        self.bucket_prefix = "eutester-bucket-test-suite-" + str(
            int(time.time()))
        self.buckets_used = set()

    def test_bucket_get_put_delete(self):
        '''
        Method: Tests creating and deleting buckets as well as getting the bucket listing
        '''
        test_bucket = self.bucket_prefix + "-simple-test-bucket"
        self.buckets_used.add(test_bucket)
        self.tester.debug(
            "Starting get/put/delete bucket test using bucket name: " +
            test_bucket)

        try:
            bucket = self.tester.s3.create_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + e)

        try:
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + e)

        self.tester.s3.delete_bucket(test_bucket)
        try:
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug(
                "Correctly got exception trying to get a deleted bucket! ")

        self.tester.debug(
            "Testing an invalid bucket names, calls should fail.")

        def test_creating_bucket_invalid_names(bad_bucket):
            should_fail = False
            try:
                bucket = self.tester.create_bucket(bad_bucket)
                should_fail = True
                try:
                    self.tester.delete_bucket(bucket)
                except:
                    self.tester.debug(
                        "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                    )
            except Exception as e:
                self.tester.debug(
                    "Correctly caught the exception for bucket name '" +
                    bad_bucket + "' Reason: " + e.reason)
            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)

        # with the EUCA-8864 fix, a new property 'objectstorage.bucket_naming_restrictions'
        # has been introduced, now 'bucket..123', 'bucket.' are actually valid bucket names
        # when using 'extended' naming convention.
        # http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
        # when DNS is not being used, for now buckets can be created with bucket
        # names like '/bucket123', 'bucket123/', see EUCA-8863
        # TODO check what bucket naming convention is being used for the test
        for bad_bucket in ["bucket&123", "bucket*123"]:
            test_creating_bucket_invalid_names(self.bucket_prefix + bad_bucket)
        """
        Test creating bucket with null name
        """
        try:
            null_bucket_name = ""
            bucket_obj = self.tester.create_bucket(null_bucket_name)
            self.tester.sleep(10)
            if bucket_obj:
                self.fail(
                    "Should have caught exception for creating bucket with empty-string name."
                )
        except S3ResponseError as e:
            assert (
                e.status == 405
            ), 'Expected response status code to be 405, actual status code is ' + str(
                e.status)
            assert (
                re.search("MethodNotAllowed", e.code)
            ), "Incorrect exception returned when creating bucket with null name."
        except Exception, e:
            self.tester.debug("Failed due to EUCA-7059 " + str(e))
Example #15
0
class BucketTestSuite(EutesterTestCase):
    def __init__(self, credpath):
        self.bucket_prefix = "buckettestsuite-" + str(int(time.time())) + "-"
        self.tester = Eucaops(credpath=credpath)
        self.test_user_id = self.tester.s3.get_canonical_user_id()

    def test_bucket_get_put_delete(self):
        '''Tests creating and deleting buckets as well as getting the bucket listing'''
        test_bucket = self.bucket_prefix + "simple_test_bucket"
        self.tester.debug(
            "Starting get/put/delete bucket test using bucket name: " +
            test_bucket)

        try:
            bucket = self.tester.s3.create_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + e)

        try:
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + e)

        self.tester.s3.delete_bucket(test_bucket)
        try:
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug(
                "Correctly got exception trying to get a deleted bucket! ")

        self.tester.debug(
            "Testing an invalid bucket names, calls should fail.")
        try:
            bad_bucket = self.bucket_prefix + "bucket123/"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket.123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket&123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "bucket*123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

        try:
            bad_bucket = self.bucket_prefix + "/bucket123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug(
                    "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail"
                )

            if should_fail:
                self.fail(
                    "Should have caught exception for bad bucket name: " +
                    bad_bucket)
        except:
            self.tester.debug("Correctly caught the exception")

    def test_bucket_acl(self):
        test_bucket = self.bucket_prefix + "acl_bucket_test"
        self.tester.debug('Starting ACL test with bucket name: ' + test_bucket)
        try:
            acl_bucket = self.tester.s3.create_bucket(test_bucket)
            self.tester.debug('Created bucket: ' + test_bucket)
        except S3CreateError:
            self.tester.debug(
                "Can't create the bucket, already exists. Deleting it an trying again"
            )
            try:
                self.tester.s3.delete_bucket(test_bucket)
                acl_bucket = self.tester.s3.create_bucket(test_bucket)
            except:
                self.tester.debug(
                    "Couldn't delete and create new bucket. Failing test")
                self.fail("Couldn't make the test bucket: " + test_bucket)

        policy = acl_bucket.get_acl()

        if policy == None:
            self.fail("No acl returned")

        self.tester.debug(policy)
        #Check that the acl is correct: owner full control.
        if len(policy.acl.grants) > 1:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Expected only 1 grant in acl. Found: " +
                      policy.acl.grants.grants.__len__())

        if policy.acl.grants[
                0].display_name != "eucalyptus" or policy.acl.grants[
                    0].permission != "FULL_CONTROL":
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Unexpected grant encountered: " +
                      policy.acl.grants[0].display_name + "  " +
                      policy.acl.grants[0].permission)

        #upload a new acl for the bucket
        new_acl = policy
        new_acl.acl.add_user_grant(permission="READ",
                                   user_id=self.test_user_id,
                                   display_name="eucalyptus_test")

        try:
            acl_bucket.set_acl(new_acl)
            acl_check = acl_bucket.get_acl()
        except S3ResponseError:
            self.fail("Failed to set or get new acl")

        self.tester.debug("Got ACL: " + acl_check.acl.to_xml())

        expected_result = '<AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>INSERT_USER_ID_HERE</ID><DisplayName>eucalyptus</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>INSERT_USER_ID_HERE</ID><DisplayName>eucalyptus</DisplayName></Grantee><Permission>READ</Permission></Grant></AccessControlList>'

        if acl_check == None or acl_check.acl.to_xml(
        ) != expected_result.replace("INSERT_USER_ID_HERE", self.test_user_id):
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Incorrect acl length or acl not found:\n" +
                      str(acl_check.acl.to_xml()) + "\n" +
                      expected_result.replace("INSERT_USER_ID_HERE",
                                              self.test_user_id))

        self.tester.debug("Grants 0 and 1: " +
                          acl_check.acl.grants[0].to_xml() + " -- " +
                          acl_check.acl.grants[1].to_xml())

        #Check each canned ACL string in boto to make sure Walrus does it right
        for acl in boto.s3.acl.CannedACLStrings:
            try:
                acl_bucket.set_acl(acl)
                acl_check = acl_bucket.get_acl()
            except Exception as e:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Got exception trying to set acl to " + acl + ": " +
                          str(e))

            self.tester.debug("Expecting a " + acl + " acl, got: " +
                              acl_check.acl.to_xml())

            expected_acl = self.tester.get_canned_acl(self.test_user_id, acl)
            if expected_acl == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(
                    "Got None when trying to generate expected acl for canned acl string: "
                    + acl)

            if expected_acl != acl_check.acl:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Invalid " + acl + " acl returned from Walrus:\n" +
                          acl_check.acl.to_xml() + "\nExpected\n" +
                          expected_acl.to_xml())
            else:
                self.tester.debug("Got correct acl for: " + acl)

        try:
            acl_bucket.set_acl('invalid-acl')
        except:
            self.tester.debug(
                "Caught expected exception from invalid canned-acl")

        self.tester.s3.delete_bucket(test_bucket)
        self.tester.debug("Bucket ACL: PASSED")
        pass

    def test_bucket_key_list_delim_prefix(self):
        """Tests the prefix/delimiter functionality of key listings and parsing"""
        test_bucket_name = self.bucket_prefix + "testbucketdelim"
        self.tester.debug(
            'Testing bucket key list delimiters and prefixes using bucket: ' +
            test_bucket_name)
        try:
            testbucket = self.tester.s3.create_bucket(
                bucket_name=test_bucket_name)
        except S3CreateError:
            self.tester.debug("bucket already exists, using it")
            try:
                testbucket = self.tester.s3.get_bucket(
                    bucket_name=test_bucket_name)
            except S3ResponseError as err:
                self.tester.debug("Fatal error: could to create or get bucket")
                for b in self.tester.s3.get_all_buckets():
                    self.tester.debug("Bucket: " + b.name)
                self.fail("Could not setup bucket, " + test_bucket_name +
                          " for test: " + err.error_message)

        prefix = "users"
        delim = "/"

        for i in range(10):
            tmp = str(i)
            self.tester.debug("adding keys iteration " + tmp)
            key = testbucket.new_key("testobject" + tmp)
            key.set_contents_from_string(
                "adlsfjaoivajsdlajsdfiajsfdlkajsfkajdasd")

            key = testbucket.new_key(prefix + "testkey" + tmp)
            key.set_contents_from_string("asjaoidjfafdjaoivnw")

            key = testbucket.new_key(prefix + delim + "object" + tmp)
            key.set_contents_from_string("avjaosvdafajsfd;lkaj")

            key = testbucket.new_key(prefix + delim + "objects" + delim +
                                     "photo" + tmp + ".jpg")
            key.set_contents_from_string("aoiavsvjasldfjadfiajss")

        keys = testbucket.get_all_keys(prefix=prefix,
                                       delimiter=delim,
                                       max_keys=10)
        self.tester.debug("Prefix with 10 keys max returned: " +
                          str(len(keys)) + " results")

        for k in keys:
            self.tester.debug(k)

        keys = testbucket.get_all_keys(prefix=prefix,
                                       delimiter=delim,
                                       max_keys=20)
        self.tester.debug("Prefix with 20 keys max returned: " +
                          str(len(keys)) + " results")

        for k in keys:
            self.tester.debug(k)

        print "Cleaning up the bucket"
        for i in range(10):
            testbucket.delete_key("testobject" + str(i))
            testbucket.delete_key(prefix + "testkey" + str(i))
            testbucket.delete_key(prefix + delim + "object" + str(i))
            testbucket.delete_key(prefix + delim + "objects" + delim +
                                  "photo" + str(i) + ".jpg")

        print "Deleting the bucket"
        self.tester.s3.delete_bucket(testbucket)

    def run_suite(self):
        self.testlist = []
        testlist = self.testlist
        testlist.append(
            self.create_testcase_from_method(self.test_bucket_get_put_delete))
        testlist.append(
            self.create_testcase_from_method(
                self.test_bucket_key_list_delim_prefix))
        #Failing due to invalid private canned acl being returned
        #testlist.append(self.create_testcase_from_method(self.test_bucket_acl))
        self.run_test_case_list(testlist)
Example #16
0
class BucketTestSuite(EutesterTestCase):
    
    def __init__(self, credpath):
        self.bucket_prefix = "buckettestsuite-" + str(int(time.time())) + "-"
        self.tester = Eucaops(credpath=credpath)
        self.test_user_id = self.tester.s3.get_canonical_user_id()
    
    def test_bucket_get_put_delete(self):
        '''Tests creating and deleting buckets as well as getting the bucket listing'''
        test_bucket=self.bucket_prefix + "simple_test_bucket"
        self.tester.debug("Starting get/put/delete bucket test using bucket name: " + test_bucket)
 
        try :
            bucket = self.tester.s3.create_bucket(test_bucket)                
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket + " was not created correctly")
        except (S3ResponseError, S3CreateError) as e:
            self.fail(test_bucket + " create caused exception: " + e)
        
        try :    
            bucket = self.tester.s3.get_bucket(test_bucket)
            if bucket == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail(test_bucket +" was not fetched by get_bucket call")
        except S3ResponseError as e:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Exception getting bucket" + e)
            
        
        self.tester.s3.delete_bucket(test_bucket)        
        try :
            if self.tester.s3.get_bucket(test_bucket) != None:
                self.tester.s3.delete_bucket(test_bucket)            
                self.fail("Delete of " + test_bucket + " failed, still exists")
        except S3ResponseError as e:
            self.tester.debug( "Correctly got exception trying to get a deleted bucket! " )
            
        self.tester.debug( "Testing an invalid bucket names, calls should fail." )
        try:
            bad_bucket = self.bucket_prefix + "bucket123/"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket.123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket&123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "bucket*123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )
        
        try:
            bad_bucket = self.bucket_prefix + "/bucket123"
            self.tester.create_bucket(bad_bucket)
            should_fail = True            
            try:
                self.tester.delete_bucket(bad_bucket)
            except:
                self.tester.debug( "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" )
                
            if should_fail:
                self.fail("Should have caught exception for bad bucket name: " + bad_bucket)
        except:
            self.tester.debug( "Correctly caught the exception" )

        """
        Test creating bucket with null name
        """
        try:
            null_bucket_name = ""
            bucket_obj = self.tester.create_bucket(null_bucket_name)
            self.tester.sleep(10)
            if bucket_obj:
                self.fail("Should have caught exception for creating bucket with null name.")
        except S3ResponseError as e:
            self.assertEqual(e.status, 405, 'Expected response status code to be 405, actual status code is ' + str(e.status))
            self.assertTrue(re.search("MethodNotAllowed", e.code), "Incorrect exception returned when creating bucket with null name.")

    def test_bucket_acl(self):
        test_bucket = self.bucket_prefix + "acl_bucket_test"
        self.tester.debug('Starting ACL test with bucket name: ' + test_bucket)        
        try: 
            acl_bucket = self.tester.s3.create_bucket(test_bucket)
            self.tester.debug('Created bucket: ' + test_bucket)
        except S3CreateError:
            self.tester.debug( "Can't create the bucket, already exists. Deleting it an trying again" )
            try :
                self.tester.s3.delete_bucket(test_bucket)            
                acl_bucket = self.tester.s3.create_bucket(test_bucket)
            except:
                self.tester.debug( "Couldn't delete and create new bucket. Failing test" )
                self.fail("Couldn't make the test bucket: " + test_bucket)
                                
        policy = acl_bucket.get_acl()
        
        if policy == None:
            self.fail("No acl returned")
        
        self.tester.debug( policy )
        #Check that the acl is correct: owner full control.
        if len(policy.acl.grants) > 1:
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Expected only 1 grant in acl. Found: " + policy.acl.grants.grants.__len__())

        if policy.acl.grants[0].display_name != "eucalyptus" or policy.acl.grants[0].permission != "FULL_CONTROL":
            self.tester.s3.delete_bucket(test_bucket)
            self.fail("Unexpected grant encountered: " + policy.acl.grants[0].display_name + "  " + policy.acl.grants[0].permission)
                    
        #upload a new acl for the bucket
        new_acl = policy
        new_acl.acl.add_user_grant(permission="READ", user_id=self.test_user_id, display_name="eucalyptus_test")        
        
        try:
            acl_bucket.set_acl(new_acl)                
            acl_check = acl_bucket.get_acl()
        except S3ResponseError:
            self.fail("Failed to set or get new acl")
        
        self.tester.debug( "Got ACL: " + acl_check.acl.to_xml() )
        
        expected_result='<AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>INSERT_USER_ID_HERE</ID><DisplayName>eucalyptus</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>INSERT_USER_ID_HERE</ID><DisplayName>eucalyptus</DisplayName></Grantee><Permission>READ</Permission></Grant></AccessControlList>'
        
        if acl_check == None or acl_check.acl.to_xml() != expected_result.replace("INSERT_USER_ID_HERE",self.test_user_id):
            self.tester.s3.delete_bucket(test_bucket) 
            self.fail("Incorrect acl length or acl not found:\n" + str(acl_check.acl.to_xml()) + "\n" + expected_result.replace("INSERT_USER_ID_HERE",self.test_user_id))
        
        self.tester.debug( "Grants 0 and 1: " + acl_check.acl.grants[0].to_xml() + " -- " + acl_check.acl.grants[1].to_xml() )
        
        #Check each canned ACL string in boto to make sure Walrus does it right
        for acl in boto.s3.acl.CannedACLStrings:
            try: 
                acl_bucket.set_acl(acl)
                acl_check = acl_bucket.get_acl()
            except Exception as e:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Got exception trying to set acl to " + acl + ": " + str(e))
            
            self.tester.debug( "Expecting a " + acl + " acl, got: " + acl_check.acl.to_xml() )
            
            expected_acl = self.tester.get_canned_acl(self.test_user_id,acl)
            if expected_acl == None:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Got None when trying to generate expected acl for canned acl string: " + acl)
            
            
            if expected_acl != acl_check.acl:
                self.tester.s3.delete_bucket(test_bucket)
                self.fail("Invalid " + acl + " acl returned from Walrus:\n" + acl_check.acl.to_xml() + "\nExpected\n" + expected_acl.to_xml())
            else:
                self.tester.debug( "Got correct acl for: " + acl  )          
        
        
        try:
            acl_bucket.set_acl('invalid-acl')
        except:            
            self.tester.debug( "Caught expected exception from invalid canned-acl" )
        
        
        
        self.tester.s3.delete_bucket(test_bucket)
        self.tester.debug( "Bucket ACL: PASSED"  )    
        pass    
    
    def test_bucket_key_list_delim_prefix(self):
        """Tests the prefix/delimiter functionality of key listings and parsing"""
        test_bucket_name = self.bucket_prefix + "testbucketdelim"
        self.tester.debug('Testing bucket key list delimiters and prefixes using bucket: ' + test_bucket_name)        
        try: 
            testbucket = self.tester.s3.create_bucket(bucket_name=test_bucket_name)
        except S3CreateError:
            self.tester.debug( "bucket already exists, using it" )
            try:
                testbucket = self.tester.s3.get_bucket(bucket_name=test_bucket_name)
            except S3ResponseError as err:
                self.tester.debug( "Fatal error: could to create or get bucket" )
                for b in self.tester.s3.get_all_buckets():
                    self.tester.debug( "Bucket: " + b.name   )             
                self.fail("Could not setup bucket, " + test_bucket_name + " for test: " + err.error_message )

        prefix = "users"
        delim = "/"
        
        for i in range(10):
            tmp = str(i)
            self.tester.debug("adding keys iteration " + tmp)
            key = testbucket.new_key("testobject" + tmp)
            key.set_contents_from_string("adlsfjaoivajsdlajsdfiajsfdlkajsfkajdasd")
            
            key = testbucket.new_key(prefix + "testkey" + tmp)
            key.set_contents_from_string("asjaoidjfafdjaoivnw")
            
            key = testbucket.new_key(prefix + delim + "object" + tmp)
            key.set_contents_from_string("avjaosvdafajsfd;lkaj")
            
            key = testbucket.new_key(prefix + delim + "objects" + delim + "photo" + tmp + ".jpg")
            key.set_contents_from_string("aoiavsvjasldfjadfiajss")
    
        keys = testbucket.get_all_keys(prefix=prefix, delimiter=delim, max_keys=10)
        self.tester.debug( "Prefix with 10 keys max returned: " + str(len(keys)) + " results" )
        
        for k in keys:
                self.tester.debug( k )
            
        keys = testbucket.get_all_keys(prefix=prefix, delimiter=delim, max_keys=20)
        self.tester.debug( "Prefix with 20 keys max returned: " + str(len(keys)) + " results" )
        
        for k in keys:
                self.tester.debug( k )
            
        print "Cleaning up the bucket"
        for i in range(10):
            testbucket.delete_key("testobject" + str(i))
            testbucket.delete_key(prefix + "testkey" + str(i))
            testbucket.delete_key(prefix + delim + "object" + str(i))
            testbucket.delete_key(prefix + delim + "objects" + delim + "photo" + str(i) + ".jpg")

        print "Deleting the bucket"
        self.tester.s3.delete_bucket(testbucket)
                
    def run_suite(self):  
        self.testlist = [] 
        testlist = self.testlist
        testlist.append(self.create_testcase_from_method(self.test_bucket_get_put_delete))
        testlist.append(self.create_testcase_from_method(self.test_bucket_key_list_delim_prefix))
        #Failing due to invalid private canned acl being returned
        #testlist.append(self.create_testcase_from_method(self.test_bucket_acl))       
        self.run_test_case_list(testlist)  
Example #17
0
class ReportingBasics(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops( config_file=config_file, password=password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" + str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name )
        self.tester.authorize_group_by_name(group_name=self.group.name, port=-1, protocol="icmp" )
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair( "keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.volume = None
        self.bucket = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.clean_method = self.cleanup
        self.cur_time = str(int(time.time()))
        date_fields = time.localtime()
        self.date = str(date_fields.tm_year) + "-" + str(date_fields.tm_mon) + "-31"
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        poll_interval = 1
        write_interval = 1
        size_time_size_unit = "MB"
        size_time_time_unit = "MINS"
        size_unit = "MB"
        time_unit = "MINS"
        self.modify_property(property="reporting.default_poll_interval_mins",value=poll_interval)
        self.modify_property(property="reporting.default_write_interval_mins",value=write_interval)
        self.modify_property(property="reporting.default_size_time_size_unit",value=size_time_size_unit)
        self.modify_property(property="reporting.default_size_time_time_unit",value=size_time_time_unit)
        self.modify_property(property="reporting.default_size_unit",value=size_unit)
        self.modify_property(property="reporting.default_time_unit",value=time_unit)

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation), "Unable to terminate instance(s)")
        if self.volume:
            self.tester.delete_volume(self.volume)
        if self.bucket:
            self.tester.clear_bucket(self.bucket)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def instance(self):
        self.reservation = self.tester.run_instance(self.image, keypair=self.keypair.name, group=self.group.name, zone=self.zone)
        file_size_in_mb = 500
        for instance in self.reservation.instances:
            assert isinstance(instance, EuInstance)
            self.volume = self.tester.create_volume(zone=self.zone, size=4)
            device_path = instance.attach_volume(self.volume)
            instance.sys("mkfs.ext3 -F " + device_path)
            instance.sys("mount " + device_path + " /mnt")
            ### Write to root fs
            instance.sys("dd if=/dev/zero of=/tmp/test.img count=" + str(file_size_in_mb) + " bs=1M")
            ### Write to volume
            instance.sys("dd if=/dev/zero of=/mnt/test.img count=" + str(file_size_in_mb) + " bs=1M")

        self.tester.sleep(180)
        for instance in self.reservation.instances:
            report_output = self.generate_report("instance","csv", self.date)
            instance_lines = self.tester.grep(instance.id, report_output)
            for line in instance_lines:
                instance_data = self.parse_instance_line(line)
                #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line):
                if not re.match(instance_data.type, "m1.small"):
                    raise Exception("Failed to find proper output for " + str(instance) + " type. Received: " + instance_data.type )
                if not int(instance_data.number)  == 1:
                    raise Exception("Failed to find proper output for " + str(instance) + " number. Received: " + instance_data.number )
                if not int(instance_data.unit_time)  > 2 :
                    raise Exception("Failed to find proper output for " + str(instance) + " unit_time. Received: " + instance_data.unit_time )
                if not int(instance_data.disk_write)  > 1000:
                    raise Exception("Failed to find proper output for " + str(instance) + " disk_write. Received: " + instance_data.disk_write )
                if not int(instance_data.disk_time_write)  > 200:
                    raise Exception("Failed to find proper output for " + str(instance) + " disk_time_write. Received: " + instance_data.disk_time_write )


    def parse_instance_line(self, line):
        InstanceData = namedtuple('InstanceData', 'id type number unit_time cpu net_total_in net_total_out '
                                                'net_extern_in net_extern_out disk_read disk_write disk_iops_read '
                                                'disk_iops_write disk_time_read disk_time_write')
        values = line.split(",")
        return InstanceData(values[0],values[1],values[2],values[3],values[4],values[5],values[6],values[7],
                            values[8],values[9],values[10],values[11],values[12],values[13],values[14])

    def s3(self):
        self.bucket = self.tester.create_bucket(bucket_name="reporting-bucket-" + self.cur_time)
        key_size = 10
        self.tester.debug("Creating random " + str(key_size) + "MB of data")
        rand_string = self.tester.id_generator(size=1024*1024*10)
        self.tester.upload_object(self.bucket.name, "reporting-key" ,contents=rand_string)
        self.tester.sleep(120)
        report_output = self.generate_report("s3", "csv",self.date)
        bucket_lines = self.tester.grep(self.bucket.name, report_output)
        for line in bucket_lines:
            bucket_data = self.parse_bucket_line(line)
            if not int(bucket_data.size) == 10:
                raise Exception('Failed to find proper size for %s' % str(self.bucket))
            if not int(bucket_data.keys) == 1:
                raise Exception('Failed to find proper number of keys for %s' % str(self.bucket))
            if not int(bucket_data.unit_time) > 16:
                raise Exception('Failed to find proper amount of usage for %s' % str(self.bucket))

    def parse_bucket_line(self, line):
        BucketData = namedtuple('BucketData', 'name keys size unit_time')
        values = line.split(",")
        return BucketData(values[0],values[1],values[2],values[3] )

    def generate_report(self, type, format, end_date):
        return self.clc.sys("source " + self.tester.credpath + "/eucarc && eureport-generate-report -t " +
                    str(type) +" -f " + str(format) + " -e " + str(end_date) )

    def modify_property(self, property, value):
        """
        Modify a eucalyptus property through the command line euca-modify-property tool
        property        Property to modify
        value           Value to set it too
        """
        command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(property) + "=" + str(value)
        if self.clc.found(command, property):
            self.debug("Properly modified property " + property)
        else:
            raise Exception("Setting property " + property + " failed")
Example #18
0
class OSGConcurrency(EutesterTestCase):

    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("-b", "--buckets", type=int, default=5)
        self.parser.add_argument("-o", "--objects", type=int, default=5)
        self.parser.add_argument("-T", "--threads", type=int, default=5)
        self.parser.add_argument("-S", "--object-size", type=int, default=64, help="Object size in KB")
        self.parser.add_argument("-M", "--mpu-threshold", type=int, default=5120,
                                 help="Multipart upload is used when the object size is bigger than the mpu-threshold "
                                      "value in Kilobyte. Any value less than 5120KB will result single file upload. "
                                      "Default value is used when not passed as an argument.")
        self.get_args()
        # Setup basic eutester object
        if self.args.region:
            self.tester = S3ops( credpath=self.args.credpath, region=self.args.region)
        else:
            self.tester = Eucaops(credpath=self.args.credpath, config_file=self.args.config,password=self.args.password)
        self.start = time.time()

        self.bucket_names = []
        self.bucket_name = "concurrency-" + str(int(self.start)) + "-"

        for i in xrange(self.args.buckets):
            bucket_name = self.bucket_name + str(i)
            self.bucket_names.append(bucket_name)
            self.tester.create_bucket(bucket_name)

        self.temp_files = []

    def clean_method(self):
        with ThreadPoolExecutor(max_workers=self.args.threads) as executor:
            for i in xrange(self.args.buckets):
                executor.submit(self.tester.clear_bucket(self.bucket_names[i]))
        for tf in self.temp_files:
            tf.close()

    def get_object(self, bucket, key_name, meta=True):
        """
        Writes the object to a temp file and returns the meta info of the object e.g hash, name.
        Returns the downloaded object when meta is set to False.
        """
        self.debug("Getting object '" + key_name + "'")
        ret_key = bucket.get_key(key_name)
        temp_object = tempfile.NamedTemporaryFile(mode="w+b", prefix="eutester-mpu")
        self.temp_files.append(temp_object)
        ret_key.get_contents_to_file(temp_object)
        if meta:
            return {'name': temp_object.name, 'hash': self.get_hash(temp_object.name)}
        return temp_object

    def single_upload(self, bucket, key_name, file_path):
        key = bucket.new_key(key_name)
        key.set_contents_from_filename(file_path)
        self.debug("Uploaded key '" + key_name + "' to bucket '" + bucket.name + "'")
        return key

    def multipart_upload(self, bucket, key_name, eufile):
        part_size = 1024 * self.args.mpu_threshold
        eufile.seek(0, os.SEEK_END)
        eufile_size = eufile.tell()
        num_parts = int(ceil(eufile_size / part_size))

        mpu = bucket.initiate_multipart_upload(key_name)
        self.debug("Initiated MPU. Using MPU Id: " + mpu.id)

        for i in range(num_parts):
            start = part_size * i
            file_part = open(eufile.name, 'rb')
            file_part.seek(start)
            data = file_part.read(part_size)
            file_part.close()
            mpu.upload_part_from_file(StringIO(data), i+1)
            self.debug("Uploaded part " + str(i+1) + " of '" + key_name + "' to bucket '" + bucket.name + "'")
        self.debug("Completing multipart upload of '" + key_name + "' to bucket '" +
                   bucket.name + "'" + " using mpu id: " + mpu.id)
        mpu.complete_upload()
        self.debug("Completed multipart upload of '" + key_name + "' to bucket '" + bucket.name + "'")

    def put_get_check(self, bucket_name, key_name, eu_file):
        """
        PUT objects, GET objects and then verify objects with object hash
        5MB is a hard-coded limit for MPU in OSG
        """
        bucket = self.tester.get_bucket_by_name(bucket_name)
        if (os.path.getsize(eu_file.name) > (5 * 1024 * 1024)) and (self.args.mpu_threshold >= (5 * 1024)):
            self.multipart_upload(bucket, key_name, eu_file)
        else:
            self.single_upload(bucket, key_name, eu_file.name)

        ret_object_meta = self.get_object(bucket, key_name)
        local_object_hash = self.get_hash(eu_file.name)

        self.debug("Matching local and remote hashes of object: " + eu_file.name)
        self.debug("Remote object: " + ret_object_meta['hash'])
        self.debug("Local object:  " + local_object_hash)
        if ret_object_meta['hash'] != local_object_hash:
            self.debug("return_object hash: " + ret_object_meta['hash'])
            self.debug("local_object hash: " + local_object_hash)
            self.debug("Uploaded content and downloaded content are not same.")
            return False
        return True

    def get_content(self, file_path):
        with open(file_path) as file_to_check:
            data = file_to_check.read()
        return data

    def get_hash(self, file_path):
        return hashlib.md5(self.get_content(file_path)).hexdigest()

    def create_file(self, size_in_kb, file_name="eutester-object"):
        temp_file = tempfile.NamedTemporaryFile(mode='w+b', prefix=file_name)
        self.temp_files.append(temp_file)
        temp_file.write(os.urandom(1024 * size_in_kb))
        return temp_file.name

    def concurrent_upload(self):
        self.debug("Creating object of " + str(self.args.object_size) + "KB")
        eu_file = open(self.create_file(self.args.object_size))
        thread_pool = []
        with ThreadPoolExecutor(max_workers=self.args.threads) as executor:
            for i in xrange(self.args.buckets):
                for j in xrange(self.args.objects):
                    thread_pool.append(executor.submit(self.put_get_check, bucket_name=self.bucket_names[i],
                                                       key_name=eu_file.name + str(j), eu_file=eu_file))

        for tp in thread_pool:
            try:
                if not tp.result():
                    self.fail("[CRITICAL] failed upload in thread")
            except Exception as e:
                self.fail("Found exception in thread-pool: " + e.message)
Example #19
0
class ReportingBasics(EutesterTestCase):
    def __init__(self, config_file=None, password=None):
        self.setuptestcase()
        # Setup basic eutester object
        self.tester = Eucaops(config_file=config_file, password=password)
        self.tester.poll_count = 120

        ### Add and authorize a group for the instance
        self.group = self.tester.add_group(group_name="group-" +
                                           str(time.time()))
        self.tester.authorize_group_by_name(group_name=self.group.name)
        self.tester.authorize_group_by_name(group_name=self.group.name,
                                            port=-1,
                                            protocol="icmp")
        ### Generate a keypair for the instance
        self.keypair = self.tester.add_keypair("keypair-" + str(time.time()))
        self.keypath = '%s/%s.pem' % (os.curdir, self.keypair.name)
        self.image = self.tester.get_emi(root_device_type="instance-store")
        self.reservation = None
        self.volume = None
        self.bucket = None
        self.private_addressing = False
        zones = self.tester.ec2.get_all_zones()
        self.zone = random.choice(zones).name
        self.clean_method = self.cleanup
        self.cur_time = str(int(time.time()))
        date_fields = time.localtime()
        self.date = str(date_fields.tm_year) + "-" + str(
            date_fields.tm_mon) + "-31"
        clcs = self.tester.get_component_machines("clc")
        if len(clcs) is 0:
            raise Exception("No CLC found")
        else:
            self.clc = clcs[0]
        poll_interval = 1
        write_interval = 1
        size_time_size_unit = "MB"
        size_time_time_unit = "MINS"
        size_unit = "MB"
        time_unit = "MINS"
        self.modify_property(property="reporting.default_poll_interval_mins",
                             value=poll_interval)
        self.modify_property(property="reporting.default_write_interval_mins",
                             value=write_interval)
        self.modify_property(property="reporting.default_size_time_size_unit",
                             value=size_time_size_unit)
        self.modify_property(property="reporting.default_size_time_time_unit",
                             value=size_time_time_unit)
        self.modify_property(property="reporting.default_size_unit",
                             value=size_unit)
        self.modify_property(property="reporting.default_time_unit",
                             value=time_unit)

    def cleanup(self):
        if self.reservation:
            self.assertTrue(self.tester.terminate_instances(self.reservation),
                            "Unable to terminate instance(s)")
        if self.volume:
            self.tester.delete_volume(self.volume)
        if self.bucket:
            self.tester.clear_bucket(self.bucket)
        self.tester.delete_group(self.group)
        self.tester.delete_keypair(self.keypair)
        os.remove(self.keypath)

    def instance(self):
        self.reservation = self.tester.run_instance(self.image,
                                                    keypair=self.keypair.name,
                                                    group=self.group.name,
                                                    zone=self.zone)
        file_size_in_mb = 500
        for instance in self.reservation.instances:
            assert isinstance(instance, EuInstance)
            self.volume = self.tester.create_volume(azone=self.zone, size=4)
            device_path = instance.attach_volume(self.volume)
            instance.sys("mkfs.ext3 -F " + device_path)
            instance.sys("mount " + device_path + " /mnt")
            ### Write to root fs
            instance.sys("dd if=/dev/zero of=/tmp/test.img count=" +
                         str(file_size_in_mb) + " bs=1M")
            ### Write to volume
            instance.sys("dd if=/dev/zero of=/mnt/test.img count=" +
                         str(file_size_in_mb) + " bs=1M")

        self.tester.sleep(180)
        for instance in self.reservation.instances:
            report_output = self.generate_report("instance", "csv", self.date)
            instance_lines = self.tester.grep(instance.id, report_output)
            for line in instance_lines:
                instance_data = self.parse_instance_line(line)
                #if not re.search( instance.id +",m1.small,1,9,0.2,0,0,0,0,93,200,0.2,0.0,0,1", line):
                if not re.match(instance_data.type, "m1.small"):
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " type. Received: " +
                                    instance_data.type)
                if not int(instance_data.number) == 1:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " number. Received: " +
                                    instance_data.number)
                if not int(instance_data.unit_time) > 2:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " unit_time. Received: " +
                                    instance_data.unit_time)
                if not int(instance_data.disk_write) > 1000:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) + " disk_write. Received: " +
                                    instance_data.disk_write)
                if not int(instance_data.disk_time_write) > 200:
                    raise Exception("Failed to find proper output for " +
                                    str(instance) +
                                    " disk_time_write. Received: " +
                                    instance_data.disk_time_write)

    def parse_instance_line(self, line):
        InstanceData = namedtuple(
            'InstanceData',
            'id type number unit_time cpu net_total_in net_total_out '
            'net_extern_in net_extern_out disk_read disk_write disk_iops_read '
            'disk_iops_write disk_time_read disk_time_write')
        values = line.split(",")
        return InstanceData(values[0], values[1], values[2], values[3],
                            values[4], values[5], values[6], values[7],
                            values[8], values[9], values[10], values[11],
                            values[12], values[13], values[14])

    def s3(self):
        self.bucket = self.tester.create_bucket(
            bucket_name="reporting-bucket-" + self.cur_time)
        key_size = 10
        self.tester.debug("Creating random " + str(key_size) + "MB of data")
        rand_string = self.tester.id_generator(size=1024 * 1024 * 10)
        self.tester.upload_object(self.bucket.name,
                                  "reporting-key",
                                  contents=rand_string)
        self.tester.sleep(120)
        report_output = self.generate_report("s3", "csv", self.date)
        bucket_lines = self.tester.grep(self.bucket.name, report_output)
        for line in bucket_lines:
            bucket_data = self.parse_bucket_line(line)
            if not int(bucket_data.size) == 10:
                raise Exception('Failed to find proper size for %s' %
                                str(self.bucket))
            if not int(bucket_data.keys) == 1:
                raise Exception('Failed to find proper number of keys for %s' %
                                str(self.bucket))
            if not int(bucket_data.unit_time) > 16:
                raise Exception(
                    'Failed to find proper amount of usage for %s' %
                    str(self.bucket))

    def parse_bucket_line(self, line):
        BucketData = namedtuple('BucketData', 'name keys size unit_time')
        values = line.split(",")
        return BucketData(values[0], values[1], values[2], values[3])

    def generate_report(self, type, format, end_date):
        return self.clc.sys("source " + self.tester.credpath +
                            "/eucarc && eureport-generate-report -t " +
                            str(type) + " -f " + str(format) + " -e " +
                            str(end_date))

    def modify_property(self, property, value):
        """
        Modify a eucalyptus property through the command line euca-modify-property tool
        property        Property to modify
        value           Value to set it too
        """
        command = "source " + self.tester.credpath + "/eucarc && " + self.tester.eucapath + "/usr/sbin/euca-modify-property -p " + str(
            property) + "=" + str(value)
        if self.clc.found(command, property):
            self.debug("Properly modified property " + property)
        else:
            raise Exception("Setting property " + property + " failed")
Example #20
0
class ObjectTestSuite(EutesterTestCase):
    data_size = 1000

    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.endpoint:
            self.tester = S3ops(credpath=self.args.credpath,
                                endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops(credpath=self.args.credpath,
                                  config_file=self.args.config,
                                  password=self.args.password)

        self.bucket_prefix = "eutester-" + str(int(time.time())) + "-"
        self.buckets_used = set()
        random.seed(time.time())
        self.test_bucket_name = self.bucket_prefix + str(random.randint(
            0, 100))
        self.test_bucket = self.tester.create_bucket(self.test_bucket_name)
        self.buckets_used.add(self.test_bucket_name)

        #Create some test data for the objects
        def ensure_bucket_exists():
            try:
                self.tester.s3.get_bucket(self.test_bucket_name)
                return True
            except Exception:
                return False

        self.tester.wait_for_result(ensure_bucket_exists, True)
        self.test_object_data = ""
        for i in range(0, self.data_size):
            self.test_object_data += chr(random.randint(32, 126))
        print "Generated data for objects: " + self.test_object_data

    def print_key_info(self, keys=None):
        for key in keys:
            self.tester.info("Key=" + str(key.key) + " -- version= " +
                             str(key.version_id) + " -- eTag= " +
                             str(key.etag) + " -- ACL= " +
                             str(key.get_xml_acl()))

    def put_object(self, bucket=None, object_key=None, object_data=None):
        """Puts an object with the specified name and data in the specified bucket"""
        if bucket == None:
            raise Exception(
                "Cannot put object without proper bucket reference")

        try:
            key = Key(bucket=bucket, name=object_key)
            key.set_contents_from_string(object_data)
            return key.etag
        except Exception as e:
            self.tester.debug("Exception occured during 'PUT' of object " +
                              object_key + " into bucket " + bucket.name +
                              ": " + e.message)
            raise e

    def post_object(self,
                    bucket_name=None,
                    object_key=None,
                    object_data=None,
                    policy=None,
                    acl=None):
        """Uploads an object using POST + form upload"""
        fields = {
            'key':
            object_key,
            'acl':
            acl,
            'AWSAccessKeyId':
            self.tester.get_access_key(),
            'Policy':
            policy,
            'Signature':
            self.sign_policy(sak=self.tester.get_secret_key(),
                             b64_policy_json=policy)
        }

        self.tester.info('Fields: ' + str(fields))
        url = 'http://' + self.tester.s3.host + ':' + str(self.tester.s3.port) \
              + '/' + self.tester.s3.path + '/' + bucket_name

        self.tester.debug('Sending POST request to: ' + url)
        response = requests.post(url,
                                 data=fields,
                                 files={'file': BytesIO(object_data)})
        return response
        #return None

    def generate_default_policy_b64(self, bucket, key, acl):
        delta = timedelta(hours=1)
        expire_time = (datetime.utcnow() + delta).replace(microsecond=0)

        policy = {
            'conditions': [
                {
                    'acl': acl
                },
                {
                    'bucket': bucket
                },
                {
                    'key': key
                },
            ],
            'expiration':
            time.strftime('%Y-%m-%dT%H:%M:%SZ', expire_time.timetuple())
        }
        policy_json = json.dumps(policy)
        self.tester.info('generated default policy: %s', policy_json)
        return base64.b64encode(policy_json)

    def sign_policy(self, sak=None, b64_policy_json=None):
        my_hmac = hmac.new(sak, digestmod=hashlib.sha1)
        my_hmac.update(b64_policy_json)
        return base64.b64encode(my_hmac.digest())

    def enable_versioning(self, bucket):
        """Enable versioning on the bucket, checking that it is not already enabled and that the operation succeeds."""
        vstatus = bucket.get_versioning_status()
        if vstatus != None and len(vstatus.keys()) > 0 and vstatus[
                'Versioning'] != None and vstatus['Versioning'] != 'Disabled':
            self.tester.info(
                "Versioning status should be null/Disabled, found: " +
                vstatus['Versioning'])
            return False
        else:
            self.tester.info("Bucket versioning is Disabled")

        #Enable versioning
        bucket.configure_versioning(True)
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
            return True
        else:
            self.tester.info("Versioning status not enabled, should be.")
            return False
        return False

    def suspend_versioning(self, bucket):
        """Suspend versioning on the bucket, checking that it is previously enabled and that the operation succeeds."""
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info(
                "Versioning status not enabled, should be. Can't suspend if not enabled...."
            )
            return False

        #Enable versioning
        bucket.configure_versioning(False)
        if bucket.get_versioning_status()['Versioning'] == 'Suspended':
            self.tester.info("Versioning status correctly set to suspended")
            return True
        else:
            self.tester.info("Versioning status not suspended.")
            return False
        return False

    def check_version_listing(self, version_list, total_expected_length):
        """Checks a version listing for both completeness and ordering as well as pagination if required"""
        self.tester.info("Checking bucket version listing. Listing is " +
                         str(len(version_list)) + " entries long")
        if total_expected_length >= 1000:
            assert (len(version_list) == 999)
        else:
            assert (len(version_list) == total_expected_length)

        prev_obj = None
        should_fail = None
        for obj in version_list:
            if isinstance(obj, Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id +
                                 "--" + obj.last_modified)
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) > 0:
                        should_fail = obj
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))
        return should_fail

    def compare_versions(self, key1, key2):
        """
        Returns -1 if key1 < key2, 0 if equal, and 1 if key1 > key2. 
        Compares names lexicographically, if equal, compares date_modified if versions are different. 
        If version_id and name are equal then key1 = key2
        If an error occurs or something is wrong, returns None
        """
        if key1.name < key2.name:
            #self.debug("Key1: " + key1.name + " is less than " + key2.name)
            return 1
        elif key1.name > key2.name:
            #self.debug("Key1: " + key1.name + " is greater than " + key2.name)
            return -1
        else:
            if key1.version_id == key2.version_id:
                #self.debug("Key1: " + key1.name + " is the same version as " + key2.name)
                return 0
            else:
                if dateutil.parser.parse(
                        key1.last_modified) > dateutil.parser.parse(
                            key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is greater than " + key2.last_modified)
                    return 1
                elif dateutil.parser.parse(
                        key1.last_modified) < dateutil.parser.parse(
                            key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is less than " + key2.last_modified)
                    return -1
        return None

    def test_object_basic_ops(self):
        """
        Tests basic operations on objects: simple GET,PUT,HEAD,DELETE.
        
        """
        self.tester.info("Basic Object Operations Test (GET/PUT/HEAD)")
        if self.test_bucket == None:
            self.fail("Error: test_bucket not set, cannot run test")

        #Test PUT & GET
        testkey = "testkey1-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket,
                        object_key=testkey,
                        object_data=self.test_object_data)

        ret_key = self.test_bucket.get_key(testkey)
        ret_content = ret_key.get_contents_as_string()

        if ret_content == self.test_object_data:
            self.tester.info("Set content = get content, put passed")
        else:
            if ret_content != None:
                self.tester.info("Got content: " + ret_content)
            else:
                self.tester.info("No content returned")
            self.tester.info("Expected content: " + self.test_object_data)
            self.fail("Put content not the same as what was returned")

        #Test HEAD
        key_meta = self.test_bucket.get_key(testkey)
        if key_meta.key != ret_key.key or key_meta.etag != ret_key.etag or key_meta.size != ret_key.size:
            self.tester.info(
                "Something is wrong, the HEAD operation returned different metadata than the GET operation"
            )
            self.tester.info("Expected key " + ret_key.key + " etag: " +
                             ret_key.etag + " Got: " + key_meta.key +
                             " etag: " + key_meta.etag)
        else:
            self.tester.info("HEAD meta = GET meta, all is good")

        #Test copy operation (GET w/source headers)
        self.tester.info("Testing COPY object")

        new_key = "testkey2"
        self.test_bucket.copy_key(new_key_name=new_key,
                                  src_bucket_name=self.test_bucket_name,
                                  src_key_name=testkey)
        keylist = self.test_bucket.list()
        counter = 0
        for k in keylist:
            if isinstance(k, Prefix):
                self.tester.info("Prefix: " + "NULL" if k == None else k.name)
            else:
                self.tester.info("Key: " + k.name + " Etag: " + k.etag)
                counter += 1
        if counter != 2:
            self.fail("Expected 2 keys after copy operation, found only: " +
                      len(keylist))
        try:
            ret_key = self.test_bucket.get_key(new_key)
        except:
            self.fail("Could not get object copy")
        if ret_key == None:
            self.fail("Could not get object copy")

        if self.test_bucket.get_key(testkey).get_contents_as_string(
        ) != ret_key.get_contents_as_string():
            self.fail("Contents of original key and copy don't match")
        else:
            self.tester.info("Copy key contents match original!")

        #Test DELETE
        self.test_bucket.delete_key(testkey)
        ret_key = None
        ret_key = self.test_bucket.get_key(testkey)
        if ret_key:
            self.tester.info("Erroneously got: " + ret_key.name)
            raise S3ResponseError(
                404,
                "Should have thrown exception for getting a non-existent object"
            )
        self.tester.info("Finishing basic ops test")

    def test_object_byte_offset_read(self):
        """Tests fetching specific byte offsets of the object"""
        self.tester.info("Byte-range Offset GET Test")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        testkey = "rangetestkey-" + str(int(time.time()))
        source_bytes = bytearray(self.test_object_data)

        #Put the object initially
        self.put_object(bucket=self.test_bucket,
                        object_key=testkey,
                        object_data=self.test_object_data)

        #Test range for first 100 bytes of object
        print "Trying start-range object get"
        try:
            data_str = Key(bucket=self.test_bucket,
                           name=testkey).get_contents_as_string(
                               headers={"Range": "bytes=0-99"})
        except:
            self.fail("Failed range object get first 100 bytes")

        startrangedata = bytearray(data_str)
        print "Got: " + startrangedata
        print "Expected: " + str(source_bytes[:100])
        start = 0
        for i in range(0, 100):
            if startrangedata[i - start] != source_bytes[i]:
                print "Byte: " + startrangedata[i] + " differs!"
                self.fail("Start-range Ranged-get failed")

        print "Trying mid-object range"
        try:
            data_str = Key(bucket=self.test_bucket,
                           name=testkey).get_contents_as_string(
                               headers={"Range": "bytes=500-599"})
        except:
            self.fail("Failed range object get for middle 100 bytes")
        midrangedata = bytearray(data_str)
        start = 500
        for i in range(start, start + 100):
            if midrangedata[i - start] != source_bytes[i]:
                print "Byte: " + midrangedata[i] + "differs!"
                self.fail("Mid-range Ranged-get failed")

        print "Trying end-range object get"
        #Test range for last 100 bytes of object
        try:
            data_str = Key(bucket=self.test_bucket,
                           name=testkey).get_contents_as_string(
                               headers={"Range": "bytes=800-899"})
        except:
            self.fail("Failed range object get for last 100 bytes")

        endrangedata = bytearray(data_str)
        print "Got: " + str(endrangedata)
        start = 800
        try:
            for i in range(start, start + 100):
                if endrangedata[i - start] != source_bytes[i]:
                    print "Byte: " + endrangedata[i] + "differs!"
                    self.fail("End-range Ranged-get failed")
        except Exception as e:
            print "Exception! Received: " + e

        print "Range test complete"

    def test_object_post(self):
        """Test the POST method for putting objects, requires a pre-signed upload policy and url"""
        self.tester.info("Testing POST form upload on bucket" +
                         self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        itr = 1
        self.tester.info('Doing ' + str(itr) + ' POST upload iterations')
        acl = 'ec2-bundle-read'
        for k in xrange(0, itr):
            key = 'postkey1' + str(k)
            data = os.urandom(512)
            computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
            self.tester.info('Data md5: ' + computed_md5 + ' data length: ' +
                             str(len(computed_md5)))
            self.tester.info('Uploading object ' + self.test_bucket_name +
                             '/' + key + ' via POST with acl : ' + acl)
            response = self.post_object(
                bucket_name=self.test_bucket_name,
                object_key=key,
                object_data=data,
                acl=acl,
                policy=self.generate_default_policy_b64(self.test_bucket_name,
                                                        key,
                                                        acl=acl))

            self.tester.info('Got response for POST: ' +
                             str(response.status_code) + ': ' +
                             str(response.text))
            assert (response.status_code == 204)
            fetched_key = self.test_bucket.get_key(key)
            fetched_content = fetched_key.get_contents_as_string()
            self.tester.info('Got fetched md5: ' + fetched_key.etag)
            self.tester.info('Calculated md5: ' + computed_md5 +
                             ' recieved md5 ' + fetched_key.etag)
            assert (fetched_key.etag == computed_md5)
            assert (fetched_content == data)

        self.tester.info("Done with upload test")

    def test_object_post_large(self):
        """Test the POST method for putting objects, requires a pre-signed upload policy and url"""
        self.tester.info("Testing POST form upload on bucket" +
                         self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        self.tester.info("Testing POST form upload of 10MB data on bucket" +
                         self.test_bucket_name)
        itr = 1
        large_obj_size_bytes = 10 * 1024 * 1024  #10MB content
        self.tester.info('Doing ' + str(itr) +
                         ' iterations of large object of size ' +
                         str(large_obj_size_bytes) + ' with POST')
        acl = 'ec2-bundle-read'
        for i in xrange(0, itr):
            key = 'postkey_10mb_' + str(i)
            self.tester.info('Generating ' + str(large_obj_size_bytes) +
                             ' bytes for upload')
            #Create some test data
            data = str(os.urandom(large_obj_size_bytes))

            self.tester.info("Data length: " + str(len(data)))
            computed_md5 = '"' + hashlib.md5(data).hexdigest() + '"'
            self.tester.info('Data md5 is: ' + computed_md5)

            self.tester.info('Uploading object via POST using acl: ' + acl)
            response = self.post_object(
                bucket_name=self.test_bucket.name,
                object_key=key,
                object_data=data,
                policy=self.generate_default_policy_b64(self.test_bucket.name,
                                                        key,
                                                        acl=acl),
                acl=acl)

            self.tester.info('Got response for POST: ' +
                             str(response.status_code) + ': ' +
                             str(response.text))
            assert (response.status_code == 204)

            self.tester.info('Fetching the content for verification')
            fetched_key = self.test_bucket.get_key(key_name=key)
            self.tester.info('Got fetched content length : ' +
                             str(fetched_key.size) + ' Expected ' +
                             str(len(data)))
            assert (fetched_key.size == len(data))
            self.tester.info('Got fetched md5: ' + fetched_key.etag)
            self.tester.info('Calculated md5: ' + computed_md5 +
                             ' recieved md5 ' + fetched_key.etag)
            assert (fetched_key.etag == computed_md5)
            fetched_content = fetched_key.get_contents_as_string()
            assert (fetched_content == data)

    def test_object_large_objects(self):
        """Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface"""
        self.tester.info(
            "Testing large-ish objects over 1MB in size on bucket" +
            self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        test_data = None
        large_obj_size_bytes = 5 * 1024 * 1024  #5MB
        self.tester.info("Generating " + str(large_obj_size_bytes) +
                         " bytes of data")

        #Create some test data
        #for i in range(0, large_obj_size_bytes):
        #    test_data += chr(random.randint(32,126))
        test_data = bytearray(os.urandom(large_obj_size_bytes))

        self.tester.info("Uploading object content of size: " +
                         str(large_obj_size_bytes) + " bytes")
        keyname = "largeobj-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname,
                        object_data=test_data)
        self.tester.info("Done uploading object")

        ret_key = self.test_bucket.get_key(keyname)
        ret_data = ret_key.get_contents_as_string()

        if ret_data != test_data:
            self.fail("Fetched data and generated data don't match")
        else:
            self.tester.info("Data matches!")

        self.tester.info("Removing large object")
        self.test_bucket.delete_key(ret_key)
        self.tester.info("Complete large object test")
        pass

    def test_object_multipart(self):
        """Tests basic multipart upload functionality"""
        self.tester.info("Testing Multipart")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        self.test_multipart_upload()
        self.test_abort_multipart_upload()

    def test_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing multipart upload")
        self.tester.info("Creating random file representing part...")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname = "multi-" + str(int(time.time()))
        self.tester.info("Initiating multipart upload...much upload")
        reply = self.initiate_multipart_upload(keyname)
        self.tester.info("Uploading parts...Such Parts")
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.tester.info("Listing parts...")
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Completing upload...So OSG")
        reply.complete_upload()
        temp_file.close()
        self.tester.info("HEAD request...")
        returned_key = self.test_bucket.get_key(keyname, validate=True)
        download_temp_file = tempfile.NamedTemporaryFile(mode="w+b",
                                                         prefix="mpu-download")
        self.tester.info("Downloading object...very mpu")
        returned_key.get_contents_to_file(download_temp_file)
        self.tester.info("Deleting object...WOW")
        self.test_bucket.delete_key(keyname)

    def test_abort_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing abort multipart upload")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname = "multi-" + str(int(time.time()))
        reply = self.initiate_multipart_upload(keyname)
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Canceling upload")
        reply.cancel_upload()
        temp_file.close()

    def initiate_multipart_upload(self, keyname):
        self.tester.info("Initiating multipart upload " + keyname)
        return self.test_bucket.initiate_multipart_upload(keyname)

    def test_object_versioning_enabled(self):
        """Tests object versioning for get/put/delete on a versioned bucket"""
        self.tester.info("Testing bucket Versioning-Enabled")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not properly enable versioning")

        #Create some keys
        keyname = "versionkey-" + str(int(time.time()))

        #Multiple versions of the data
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"

        #Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname,
                        object_data=v1data)

        #Get v1
        obj_v1 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v1.etag, data=v1data)

        self.tester.info(
            "Initial bucket state after object uploads with versioning enabled:"
        )
        self.print_key_info(keys=[obj_v1])

        self.tester.info("Adding another version")
        #Put v2 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname,
                        object_data=v2data)
        obj_v2 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v2.etag, data=v2data)
        self.print_key_info(keys=[obj_v1, obj_v2])

        self.tester.info("Adding another version")
        #Put v3 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname,
                        object_data=v3data)
        obj_v3 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v3.etag, data=v3data)
        self.print_key_info(keys=[obj_v1, obj_v2, obj_v3])

        self.tester.info("Getting specific version")
        #Get a specific version, v1
        v1_return = self.test_bucket.get_key(key_name=keyname,
                                             version_id=obj_v1.version_id)
        self.print_key_info(keys=[v1_return])

        #Delete current latest version (v3)
        self.test_bucket.delete_key(keyname)

        del_obj = self.test_bucket.get_key(keyname)
        if del_obj:
            self.tester.info("Erroneously got: " + del_obj.name)
            raise S3ResponseError(
                404,
                "Should have thrown this exception for getting a non-existent object"
            )

        #Restore v1 using copy
        self.tester.info("Restoring version")
        try:
            self.test_bucket.copy_key(new_key_name=obj_v1.key,
                                      src_bucket_name=self.test_bucket_name,
                                      src_key_name=keyname,
                                      src_version_id=obj_v1.version_id)
        except S3ResponseError as e:
            self.fail(
                "Failed to restore key from previous version using copy got error: "
                + str(e.status))

        restored_obj = self.test_bucket.get_key(keyname)
        assert (restored_obj != None)
        self.tester.check_md5(eTag=restored_obj.etag, data=v1data)
        self.print_key_info(keys=[restored_obj])

        #Put v3 again
        self.tester.info("Adding another version")
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname,
                        object_data=v3data)
        self.tester.check_md5(eTag=obj_v3.etag, data=v3data)
        self.print_key_info([self.test_bucket.get_key(keyname)])

        #Delete v2 explicitly
        self.test_bucket.delete_key(key_name=obj_v2.key,
                                    version_id=obj_v2.version_id)
        del_obj = self.test_bucket.get_key(keyname,
                                           version_id=obj_v2.version_id)
        if del_obj:
            raise S3ResponseError(
                "Should have gotten 404 not-found error, but got: " +
                del_obj.key + " instead", 404)

        #Show what's on top
        top_obj = self.test_bucket.get_key(keyname)
        self.print_key_info([top_obj])
        self.tester.check_md5(eTag=top_obj.etag, data=v3data)

        self.tester.info("Finished the versioning enabled test. Success!!")

    def clear_and_rebuild_bucket(self, bucket_name):
        self.tester.clear_bucket(bucket_name)
        return self.tester.create_bucket(bucket_name)

    def test_object_versionlisting(self):
        """
        Tests object version listing from a bucket
        """
        version_max = 3
        keyrange = 20
        self.tester.info(
            "Testing listing versions in a bucket and pagination using " +
            str(keyrange) + " keys with " + str(version_max) +
            " versions per key")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not enable versioning properly. Failing")

        key = "testkey-" + str(int(time.time()))
        keys = [key + str(k) for k in range(0, keyrange)]
        contents = [
            self.test_object_data + "--v" + str(v)
            for v in range(0, version_max)
        ]

        try:
            for keyname in keys:
                #Put version_max versions of each key
                for v in range(0, version_max):
                    self.tester.info("Putting: " + keyname + " version " +
                                     str(v))
                    self.test_bucket.new_key(keyname).set_contents_from_string(
                        contents[v])
        except S3ResponseError as e:
            self.fail("Failed putting object versions for test: " +
                      str(e.status))
        listing = self.test_bucket.get_all_versions()
        self.tester.info("Bucket version listing is " + str(len(listing)) +
                         " entries long")
        if keyrange * version_max >= 1000:
            if not len(listing) == 999:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception(
                    "Bucket version listing did not limit the response to 999. Instead: "
                    + str(len(listing)))
        else:
            if not len(listing) == keyrange * version_max:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception(
                    "Bucket version listing did not equal the number uploaded. Instead: "
                    + str(len(listing)))

        prev_obj = None
        for obj in listing:
            if isinstance(obj, Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id +
                                 "--" + obj.last_modified)
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) <= 0:
                        raise Exception(
                            "Version listing not sorted correctly, offending key: "
                            + obj.name + " version: " + obj.version_id +
                            " date: " + obj.last_modified)
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))

    def test_object_versioning_suspended(self):
        """Tests object versioning on a suspended bucket, a more complicated test than the Enabled test"""
        self.tester.info("Testing bucket Versioning-Suspended")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        #Create some keys
        keyname1 = "versionkey1-" + str(int(time.time()))
        keyname2 = "versionkey2-" + str(int(time.time()))
        keyname3 = "versionkey3-" + str(int(time.time()))
        keyname4 = "versionkey4-" + str(int(time.time()))
        keyname5 = "versionkey5-" + str(int(time.time()))
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"

        vstatus = self.test_bucket.get_versioning_status()
        if vstatus:
            self.fail("Versioning status should be null/Disabled but was: " +
                      str(vstatus))
        else:
            self.tester.info("Bucket versioning is Disabled")

        self.put_object(bucket=self.test_bucket,
                        object_key=keyname1,
                        object_data=v1data)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname2,
                        object_data=v1data)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname3,
                        object_data=v1data)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname4,
                        object_data=v1data)
        self.put_object(bucket=self.test_bucket,
                        object_key=keyname5,
                        object_data=v1data)

        key1 = self.test_bucket.get_key(keyname1)
        key2 = self.test_bucket.get_key(keyname2)
        key3 = self.test_bucket.get_key(keyname3)
        key4 = self.test_bucket.get_key(keyname4)
        key5 = self.test_bucket.get_key(keyname5)

        self.tester.info(
            "Initial bucket state after object uploads without versioning:")
        self.print_key_info(keys=[key1, key2, key3, key4, key5])

        #Enable versioning
        self.test_bucket.configure_versioning(True)
        if self.test_bucket.get_versioning_status():
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info("Versionign status not enabled, should be.")

        #Update a subset of the keys
        key1_etag2 = self.put_object(bucket=self.test_bucket,
                                     object_key=keyname1,
                                     object_data=v2data)
        key2_etag2 = self.put_object(bucket=self.test_bucket,
                                     object_key=keyname2,
                                     object_data=v2data)

        key3_etag2 = self.put_object(bucket=self.test_bucket,
                                     object_key=keyname3,
                                     object_data=v2data)
        key3_etag3 = self.put_object(bucket=self.test_bucket,
                                     object_key=keyname3,
                                     object_data=v3data)

        #Delete a key
        self.test_bucket.delete_key(keyname5)

        #Suspend versioning
        self.test_bucket.configure_versioning(False)

        #Get latest of each key
        key1 = self.test_bucket.get_key(keyname1)
        key2 = self.test_bucket.get_key(keyname2)
        key3 = self.test_bucket.get_key(keyname3)
        key4 = self.test_bucket.get_key(keyname4)
        key5 = self.test_bucket.get_key(keyname5)

        #Delete a key

        #Add a key

        #Add same key again

        #Fetch each key

    def test_object_acl(self):
        """Tests object acl get/set and manipulation"""
        self.fail("Test not implemented")

        #TODO: test custom and canned acls that are both valid an invalid

    def test_object_torrent(self):
        """Tests object torrents"""
        self.fail("Feature not implemented yet")

    def clean_method(self):
        '''This is the teardown method'''
        #Delete the testing bucket if it is left-over
        self.tester.info('Deleting the buckets used for testing')
        for bucket in self.buckets_used:
            try:
                self.tester.info('Checking bucket ' + bucket +
                                 ' for possible cleaning/delete')
                if self.tester.s3.head_bucket(bucket) != None:
                    self.tester.info('Found bucket exists, cleaning it')
                    self.tester.clear_bucket(bucket)
                else:
                    self.tester.info('Bucket ' + bucket +
                                     ' not found, skipping')
            except Exception as e:
                self.tester.info('Exception checking bucket ' + bucket +
                                 ' Exception msg: ' + e.message)
        return

    def test_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing multipart upload")
        self.tester.info("Creating random file representing part...")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname = "multi-" + str(int(time.time()))
        self.tester.info("Initiating multipart upload...much upload")
        reply = self.initiate_multipart_upload(keyname)
        self.tester.info("Uploading parts...Such Parts")
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.tester.info("Listing parts...")
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info("Completing upload...So OSG")
        reply.complete_upload()
        temp_file.close()
        self.tester.info("HEAD request...")
        returned_key = self.test_bucket.get_key(keyname, validate=True)
        download_temp_file = tempfile.NamedTemporaryFile(mode="w+b",
                                                         prefix="mpu-download")
        self.tester.info("Downloading object...very mpu")
        returned_key.get_contents_to_file(download_temp_file)
        self.tester.info("Deleting object...WOW")
        self.test_bucket.delete_key(keyname)

    def test_abort_multipart_upload(self):
        '''Basic multipart upload'''
        self.tester.info("Testing abort multipart upload")
        temp_file = tempfile.NamedTemporaryFile(mode="w+b", prefix="multipart")
        temp_file.write(os.urandom(5 * 1024 * 1024))
        keyname = "multi-" + str(int(time.time()))
        reply = self.initiate_multipart_upload(keyname)
        for partnum in range(1, 11):
            temp_file.seek(0, os.SEEK_SET)
            reply.upload_part_from_file(temp_file, partnum)
        self.test_bucket.get_all_multipart_uploads()
        self.tester.info(
            "!!!!!!!!!!!!!!!!!!!!!!!!!!DO NOT WANT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1"
        )
        self.tester.info("Canceling upload")
        reply.cancel_upload()
        temp_file.close()

    def initiate_multipart_upload(self, keyname):
        self.tester.info("Initiating multipart upload " + keyname)
        return self.test_bucket.initiate_multipart_upload(keyname)
Example #21
0
class ObjectTestSuite(EutesterTestCase):
    data_size = 1000
    
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.parser.add_argument("--s3endpoint", default=None)
        self.get_args()
        # Setup basic eutester object
        if self.args.s3endpoint:
            self.tester = S3ops( credpath=self.args.credpath, endpoint=self.args.endpoint)
        else:
            self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)
        
        self.bucket_prefix = "eutester-bucket-test-suite-" + str(int(time.time())) + "-"
        self.buckets_used = set()
        random.seed(time.time())
        self.test_bucket_name = self.bucket_prefix + str(random.randint(0,100))
        self.test_bucket = self.tester.create_bucket(self.test_bucket_name)
        self.buckets_used.add(self.test_bucket_name)
        #Create some test data for the objects
        self.test_object_data = ""
        for i in range(0, self.data_size):
            self.test_object_data += chr(random.randint(32,126))            
        print "Generated data for objects: " + self.test_object_data
        
    
    def print_key_info(self, keys=None):
        for key in keys:
            self.tester.info("Key=" + str(key.key) + " -- version= " + str(key.version_id) + " -- eTag= " + str(key.etag)
                             + " -- ACL= " + str(key.get_xml_acl()))
    
    def put_object(self, bucket=None, object_key=None, object_data=None):
        """Puts an object with the specified name and data in the specified bucket"""
        if bucket == None:
            raise Exception("Cannot put object without proper bucket reference")
        
        try :
            key = Key(bucket=bucket,name=object_key)
            key.set_contents_from_string(object_data)                        
            return key.etag
        except Exception as e:
            self.tester.info("Exception occured during 'PUT' of object " + object_key + " into bucket " + bucket.name + ": " + e.message)
            return None
        
     
    def enable_versioning(self, bucket):
        """Enable versioning on the bucket, checking that it is not already enabled and that the operation succeeds."""
        vstatus = bucket.get_versioning_status()
        if vstatus != None and len(vstatus.keys()) > 0 and vstatus['Versioning'] != None and vstatus['Versioning'] != 'Disabled':
            self.tester.info("Versioning status should be null/Disabled, found: " + vstatus['Versioning'])
            return False
        else:
            self.tester.info("Bucket versioning is Disabled")
        
        #Enable versioning
        bucket.configure_versioning(True)
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
            return True
        else:
            self.tester.info("Versioning status not enabled, should be.")
            return False
        return False
    
    def suspend_versioning(self, bucket):
        """Suspend versioning on the bucket, checking that it is previously enabled and that the operation succeeds."""
        if bucket.get_versioning_status()['Versioning'] == 'Enabled':
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info("Versioning status not enabled, should be. Can't suspend if not enabled....")
            return False
    
        #Enable versioning
        bucket.configure_versioning(False)
        if bucket.get_versioning_status()['Versioning'] == 'Suspended':
            self.tester.info("Versioning status correctly set to suspended")
            return True
        else:
            self.tester.info("Versioning status not suspended.")
            return False
        return False 
             
    def check_version_listing(self, version_list, total_expected_length):
        """Checks a version listing for both completeness and ordering as well as pagination if required"""
        self.tester.info("Checking bucket version listing. Listing is " + str(len(version_list)) + " entries long")
        if total_expected_length >= 1000:
            assert(len(version_list) == 999)
        else:
            assert(len(version_list) == total_expected_length)
        
        prev_obj = None
        should_fail = None
        for obj in version_list:
            if isinstance(obj,Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)                
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) > 0:
                        should_fail = obj
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))
        return should_fail

    def compare_versions(self, key1, key2):
        """
        Returns -1 if key1 < key2, 0 if equal, and 1 if key1 > key2. 
        Compares names lexicographically, if equal, compares date_modified if versions are different. 
        If version_id and name are equal then key1 = key2
        If an error occurs or something is wrong, returns None
        """
        if key1.name < key2.name:
            #self.debug("Key1: " + key1.name + " is less than " + key2.name)
            return 1
        elif key1.name > key2.name:
            #self.debug("Key1: " + key1.name + " is greater than " + key2.name)
            return -1
        else:
            if key1.version_id == key2.version_id:
                #self.debug("Key1: " + key1.name + " is the same version as " + key2.name)
                return 0
            else:
                if dateutil.parser.parse(key1.last_modified) > dateutil.parser.parse(key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is greater than " + key2.last_modified)
                    return 1
                elif dateutil.parser.parse(key1.last_modified) < dateutil.parser.parse(key2.last_modified):
                    #self.debug("Key1: " + key1.last_modified + " last modified is less than " + key2.last_modified)
                    return -1
        return None
    
    def test_object_basic_ops(self):
        """
        Tests basic operations on objects: simple GET,PUT,HEAD,DELETE.
        
        """
        self.tester.info("Basic Object Operations Test (GET/PUT/HEAD)")
        if self.test_bucket == None:
            self.fail("Error: test_bucket not set, cannot run test")
            
        #Test PUT & GET
        testkey="testkey1-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)
        
        ret_key = self.test_bucket.get_key(testkey)
        ret_content = ret_key.get_contents_as_string()
        
        if ret_content == self.test_object_data:
            self.tester.info("Set content = get content, put passed")
        else:
            if ret_content != None:
                self.tester.info("Got content: " + ret_content)
            else:
                self.tester.info("No content returned")
            self.tester.info("Expected content: " + self.test_object_data)
            self.fail("Put content not the same as what was returned")
        
        #Test HEAD
        key_meta = self.test_bucket.get_key(testkey)
        if key_meta != ret_key:
            self.tester.info("Something is wrong, the HEAD operation returned different metadata than the GET operation")
        else:
            self.tester.info("HEAD meta = GET meta, all is good")
        
        #Test copy operation (GET w/source headers)
        new_key = "testkey2"
        self.test_bucket.copy_key(new_key, self.test_bucket_name,testkey)
        keylist = self.test_bucket.list()
        counter = 0
        for k in keylist:
            if isinstance(k, Prefix):
                self.tester.info("Prefix: " + "NULL" if k == None else k.name)
            else:
                self.tester.info("Key: " + k.name + " Etag: " + k.etag)
                counter += 1
        if counter != 2:
            self.fail("Expected 2 keys after copy operation, found only: " + len(keylist))
        try:
            ret_key = self.test_bucket.get_key(new_key)
        except:
            self.fail("Could not get object copy")
        if ret_key == None:
            self.fail("Could not get object copy")
            
        if self.test_bucket.get_key(testkey).get_contents_as_string() != ret_key.get_contents_as_string():
            self.fail("Contents of original key and copy don't match")
        else:
            self.tester.info("Copy key contents match original!")
        
        #Test DELETE
        self.test_bucket.delete_key(testkey)
        ret_key = None
        ret_key = self.test_bucket.get_key(testkey)
        if ret_key:
            self.tester.info("Erroneously got: " + ret_key.name)
            raise S3ResponseError(404, "Should have thrown exception for getting a non-existent object")
        self.tester.info("Finishing basic ops test")
               
    def test_object_byte_offset_read(self):
        """Tests fetching specific byte offsets of the object"""
        self.tester.info("Byte-range Offset GET Test")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        testkey = "rangetestkey-" + str(int(time.time()))
        source_bytes = bytearray(self.test_object_data)
        
        #Put the object initially
        self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data)
        
        #Test range for first 100 bytes of object
        print "Trying start-range object get"
        try:
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=0-99"})
        except:
            self.fail("Failed range object get first 100 bytes")
        
        startrangedata = bytearray(data_str)        
        print "Got: " + startrangedata
        print "Expected: " + str(source_bytes[:100])
        start = 0        
        for i in range(0,100):
            if startrangedata[i-start] != source_bytes[i]:
                print "Byte: " + startrangedata[i] + " differs!"
                self.fail("Start-range Ranged-get failed")
            
        print "Trying mid-object range"   
        try: 
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=500-599"})
        except:
            self.fail("Failed range object get for middle 100 bytes")     
        midrangedata = bytearray(data_str)
        start = 500
        for i in range(start,start+100):
            if midrangedata[i-start] != source_bytes[i]:
                print "Byte: " + midrangedata[i] + "differs!"
                self.fail("Mid-range Ranged-get failed")
        
        print "Trying end-range object get"
        #Test range for last 100 bytes of object
        try:
            data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=800-899"})
        except:
            self.fail("Failed range object get for last 100 bytes")
            
        endrangedata = bytearray(data_str)
        print "Got: " + str(endrangedata)
        start = 800
        try:
            for i in range(start,start+100):
                if endrangedata[i-start] != source_bytes[i]:
                    print "Byte: " + endrangedata[i] + "differs!"
                    self.fail("End-range Ranged-get failed")
        except Exception as e:
            print "Exception! Received: " + e
        
        print "Range test complete"
        
    def test_object_post(self):
        """Test the POST method for putting objects, requires a pre-signed upload policy and url"""
        self.fail("Test not implemented")
                
    def test_object_large_objects(self):
        """Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface"""
        self.tester.info("Testing large-ish objects over 1MB in size on bucket" + self.test_bucket_name)
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        test_data = ""
        large_obj_size_bytes = 5 * 1024 * 1024 #5MB
        self.tester.info("Generating " + str(large_obj_size_bytes) + " bytes of data")

        #Create some test data
        for i in range(0, large_obj_size_bytes):
            test_data += chr(random.randint(32,126))

        self.tester.info("Uploading object content of size: " + str(large_obj_size_bytes) + " bytes")        
        keyname = "largeobj-" + str(int(time.time()))
        self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=test_data)
        self.tester.info("Done uploading object")

        ret_key = self.test_bucket.get_key(keyname)
        ret_data = ret_key.get_contents_as_string()
        
        if ret_data != test_data:
            self.fail("Fetched data and generated data don't match")
        else:
            self.tester.info("Data matches!")
        
        self.tester.info("Removing large object")
        self.test_bucket.delete_key(ret_key)
        self.tester.info("Complete large object test")
        pass
            
    def test_object_multipart(self):
        """Test the multipart upload interface"""
        self.fail("Feature not implemented")
        
    def test_object_versioning_enabled(self):
        """Tests object versioning for get/put/delete on a versioned bucket"""
        self.tester.info("Testing bucket Versioning-Enabled")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not properly enable versioning")
             
        #Create some keys
        keyname = "versionkey-" + str(int(time.time()))
        
        #Multiple versions of the data
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"
        
        #Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly
        self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v1data)
                
        #Get v1
        obj_v1 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v1.etag,data=v1data)
        
        self.tester.info("Initial bucket state after object uploads without versioning:")
        self.print_key_info(keys=[obj_v1])
                
        #Put v2 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v2data)
        obj_v2 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v2.etag,data=v2data)
        self.print_key_info(keys=[obj_v1, obj_v2])
        
        #Put v3 (and get/head to confirm success)
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
        obj_v3 = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
        self.print_key_info(keys=[obj_v1, obj_v2, obj_v3])
        
        #Get a specific version, v1
        v1_return = self.test_bucket.get_key(key_name=keyname,version_id=obj_v1.version_id)
        self.print_key_info(keys=[v1_return])
        
        #Delete current latest version (v3)
        self.test_bucket.delete_key(keyname)

        del_obj = self.test_bucket.get_key(keyname)
        if del_obj:
            self.tester.info("Erroneously got: " + del_obj.name)
            raise S3ResponseError(404, "Should have thrown this exception for getting a non-existent object")
        
        #Restore v1 using copy
        try:
            self.test_bucket.copy_key(new_key_name=obj_v1.key,src_bucket_name=self.test_bucket_name,src_key_name=keyname,src_version_id=obj_v1.version_id)
        except S3ResponseError as e:
            self.fail("Failed to restore key from previous version using copy got error: " + str(e.status))
            
        restored_obj = self.test_bucket.get_key(keyname)
        self.tester.check_md5(eTag=restored_obj.etag,data=v1data)
        self.print_key_info(keys=[restored_obj])
        
        #Put v3 again
        self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data)
        self.tester.check_md5(eTag=obj_v3.etag,data=v3data)
        self.print_key_info([self.test_bucket.get_key(keyname)])

        #Delete v2 explicitly
        self.test_bucket.delete_key(key_name=obj_v2.key,version_id=obj_v2.version_id)
        del_obj = self.test_bucket.get_key(keyname,version_id=obj_v2.version_id)
        if del_obj:
            raise S3ResponseError("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead",404)

        #Show what's on top
        top_obj = self.test_bucket.get_key(keyname)
        self.print_key_info([top_obj])
        self.tester.check_md5(eTag=top_obj.etag,data=v3data)
        
        self.tester.info("Finished the versioning enabled test. Success!!")

    def clear_and_rebuild_bucket(self, bucket_name):
        self.tester.clear_bucket(bucket_name)
        return self.tester.create_bucket(bucket_name)

    def test_object_versionlisting(self):
        """
        Tests object version listing from a bucket
        """
        version_max = 3
        keyrange = 20
        self.tester.info("Testing listing versions in a bucket and pagination using " + str(keyrange) + " keys with " + str(version_max) + " versions per key")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        if not self.enable_versioning(self.test_bucket):
            self.fail("Could not enable versioning properly. Failing")
        
        key = "testkey-" + str(int(time.time()))
        keys = [ key + str(k) for k in range(0,keyrange)]        
        contents = [ self.test_object_data + "--v" + str(v) for v in range(0,version_max)]        

        try:
            for keyname in keys:
                #Put version_max versions of each key
                for v in range(0,version_max):
                    self.tester.info("Putting: " + keyname + " version " + str(v))
                    self.test_bucket.new_key(keyname).set_contents_from_string(contents[v])
        except S3ResponseError as e:
            self.fail("Failed putting object versions for test: " + str(e.status))
        listing = self.test_bucket.get_all_versions()
        self.tester.info("Bucket version listing is " + str(len(listing)) + " entries long")
        if keyrange * version_max >= 1000:
            if not len(listing) == 999:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception("Bucket version listing did not limit the response to 999. Instead: " + str(len(listing)))
        else:
            if not len(listing) == keyrange * version_max:
                self.test_bucket.configure_versioning(False)
                self.tester.debug(str(listing))
                raise Exception("Bucket version listing did not equal the number uploaded. Instead: " + str(len(listing)))
        
        prev_obj = None
        for obj in listing:
            if isinstance(obj,Key):
                self.tester.info("Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified)                
                if prev_obj != None:
                    if self.compare_versions(prev_obj, obj) <= 0:
                        raise Exception("Version listing not sorted correctly, offending key: " + obj.name + " version: " + obj.version_id + " date: " + obj.last_modified)
                prev_obj = obj
            else:
                self.tester.info("Not a key, skipping: " + str(obj))
    
    def test_object_versioning_suspended(self):
        """Tests object versioning on a suspended bucket, a more complicated test than the Enabled test"""
        self.tester.info("Testing bucket Versioning-Suspended")
        self.test_bucket = self.clear_and_rebuild_bucket(self.test_bucket_name)
        #Create some keys
        keyname1 = "versionkey1-" + str(int(time.time()))
        keyname2 = "versionkey2-" + str(int(time.time()))
        keyname3 = "versionkey3-" + str(int(time.time()))
        keyname4 = "versionkey4-" + str(int(time.time()))
        keyname5 = "versionkey5-" + str(int(time.time()))
        v1data = self.test_object_data + "--version1"
        v2data = self.test_object_data + "--version2"
        v3data = self.test_object_data + "--version3"
        
        vstatus = self.test_bucket.get_versioning_status()
        if vstatus:
            self.fail("Versioning status should be null/Disabled but was: " + str(vstatus))
        else:
            self.tester.info("Bucket versioning is Disabled")
        
        self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname4, object_data=v1data)
        self.put_object(bucket=self.test_bucket, object_key=keyname5, object_data=v1data)
                    
        key1 = self.test_bucket.get_key(keyname1)        
        key2 = self.test_bucket.get_key(keyname2)        
        key3 = self.test_bucket.get_key(keyname3)        
        key4 = self.test_bucket.get_key(keyname4)        
        key5 = self.test_bucket.get_key(keyname5)

        self.tester.info("Initial bucket state after object uploads without versioning:")
        self.print_key_info(keys=[key1,key2,key3,key4,key5])
        
        
        
        #Enable versioning
        self.test_bucket.configure_versioning(True)
        if self.test_bucket.get_versioning_status():
            self.tester.info("Versioning status correctly set to enabled")
        else:
            self.tester.info("Versionign status not enabled, should be.")            
        
        #Update a subset of the keys
        key1_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname1,object_data=v2data)
        key2_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname2,object_data=v2data)
        
        key3_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v2data)
        key3_etag3=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v3data)
        
        #Delete a key
        self.test_bucket.delete_key(keyname5)

        #Suspend versioning
        self.test_bucket.configure_versioning(False)
        
        #Get latest of each key
        key1=self.test_bucket.get_key(keyname1)
        key2=self.test_bucket.get_key(keyname2)
        key3=self.test_bucket.get_key(keyname3)
        key4=self.test_bucket.get_key(keyname4)
        key5=self.test_bucket.get_key(keyname5)
        
        #Delete a key
        
        #Add a key
        
        #Add same key again
        
        #Fetch each key
    
    def test_object_acl(self):
        """Tests object acl get/set and manipulation"""
        self.fail("Test not implemented")
        
        #TODO: test custom and canned acls that are both valid an invalid
        
    def test_object_torrent(self):
        """Tests object torrents"""
        self.fail("Feature not implemented yet")

    
    def clean_method(self):
        '''This is the teardown method'''
        #Delete the testing bucket if it is left-over
        self.tester.info('Deleting the buckets used for testing')
        for bucket in self.buckets_used:
            try:
                self.tester.info('Checking bucket ' + bucket + ' for possible cleaning/delete')
                if self.tester.s3.bucket_exists(bucket):
                    self.tester.info('Found bucket exists, cleaning it')
                    self.tester.clear_bucket(bucket)
                    self.buckets_used.remove(bucket)
                else:
                    self.tester.info('Bucket ' + bucket + ' not found, skipping')
            except:
                self.tester.info('Exception checking bucket ' + bucket)

        return
Example #22
0
class HAtests(InstanceBasics, BucketTestSuite):
    def __init__(self):
        self.setuptestcase()
        self.setup_parser()
        self.get_args()
        if not boto.config.has_section('Boto'):
            boto.config.add_section('Boto')
            boto.config.set('Boto', 'num_retries', '1')
            boto.config.set('Boto', 'http_socket_timeout', '20')
        self.tester = Eucaops(config_file=self.args.config_file,
                              password=self.args.password)
        self.tester.ec2.connection.timeout = 30
        self.servman = self.tester.service_manager
        self.instance_timeout = 120
        ### Add and authorize a group for the instance
        self.start_time = str(int(time.time()))
        try:
            self.group = self.tester.add_group(group_name="group-" +
                                               self.start_time)
            self.tester.authorize_group_by_name(group_name=self.group.name)
            self.tester.authorize_group_by_name(group_name=self.group.name,
                                                port=-1,
                                                protocol="icmp")
            ### Generate a keypair for the instance
            self.keypair = self.tester.add_keypair("keypair-" +
                                                   self.start_time)
            self.keypath = os.curdir + "/" + self.keypair.name + ".pem"
            if self.args.emi:
                self.image = self.tester.get_emi(self.args.emi)
            else:
                self.image = self.tester.get_emi(
                    root_device_type="instance-store")
            self.reservation = None
            self.private_addressing = False
            self.bucket_prefix = "buckettestsuite-" + self.start_time + "-"
            self.test_user_id = self.tester.s3.get_canonical_user_id()
            zones = self.tester.ec2.get_all_zones()
            self.zone = random.choice(zones).name

            self.tester.clc = self.tester.service_manager.get_enabled_clc(
            ).machine
            self.version = self.tester.clc.sys(
                "cat " + self.tester.eucapath +
                "/etc/eucalyptus/eucalyptus-version")[0]
            ### Create standing resources that will be checked after all failures
            ### Instance, volume, buckets
            ###
            self.standing_reservation = self.tester.run_instance(
                image=self.image,
                keypair=self.keypair.name,
                group=self.group.name,
                zone=self.zone)
            self.volume = self.tester.create_volume(self.zone)
            self.device = self.standing_reservation.instances[0].attach_volume(
                self.volume)
            for instance in self.standing_reservation.instances:
                instance.sys("echo " + instance.id + " > " + self.device)
            self.standing_bucket_name = "failover-bucket-" + self.start_time
            self.standing_bucket = self.tester.create_bucket(
                self.standing_bucket_name)
            self.standing_key_name = "failover-key-" + self.start_time
            self.standing_key = self.tester.upload_object(
                self.standing_bucket_name, self.standing_key_name)
            self.standing_key = self.tester.get_objects_by_prefix(
                self.standing_bucket_name, self.standing_key_name)
            self.run_instance_params = {
                'image': self.image,
                'keypair': self.keypair.name,
                'group': self.group.name,
                'zone': self.zone,
                'timeout': self.instance_timeout
            }
        except Exception, e:
            self.clean_method()
            raise Exception("Init for testcase failed. Reason: " + str(e))