def test_bucket_location(self): test_bucket = BucketTest.bucket_prefix + "location_test_bucket" print_header('Starting bucket location test using bucket: ' + test_bucket) BucketTest.walrus.create_bucket(test_bucket) bucket = BucketTest.walrus.get_bucket(test_bucket) if bucket != None and (bucket.get_location() == Location.DEFAULT or bucket.get_location() == 'US'): BucketTest.walrus.delete_bucket(test_bucket) else: bucket.delete() self.fail("Bucket location test failed, could not get bucket or location is not 'US'") test_bucket = BucketTest.bucket_prefix + "eu_location_test" bucket = BucketTest.walrus.create_bucket(test_bucket,location=Location.EU) if bucket == None: self.fail("Bucket creation at location EU failed") else: loc = bucket.get_location() if loc == Location.EU: print "Got correct bucket location, EU" bucket.delete() else: print "Incorrect bucket location, failing" bucket.delete() self.fail("Bucket location incorrect, expected: EU, got: " + loc) pass
def setUpClass(cls): print_header("Setting up Bucket Test") random.seed(time.time()) host = config.endpoint path = config.path port = config.port access_key = config.access_key secret_key = config.secret_key print "Config = (" + host + ":" + str(port) + path + "--" + access_key + "--" + secret_key + ")" cls.walrus = S3Connection(host=host,path=path,port=port,aws_access_key_id=access_key,aws_secret_access_key=secret_key,is_secure=False,calling_format=OrdinaryCallingFormat(), debug=2) #Create some test data for the objects cls.test_object_data = "" for i in range(0, cls.data_size): cls.test_object_data += chr(random.randint(32,126)) print "Generated data for objects: " + cls.test_object_data #TODO: delete all versions and delete markers so that a versioned bucket can be deleted #Clean up existing buckets etc to make sure the test is clear. print "Cleaning existing buckets with " + ObjectTest.bucket_prefix + " prefix from previous tests" try : listing = ObjectTest.walrus.get_all_buckets() for bucket in listing: if bucket.name.startswith(ObjectTest.bucket_prefix): clean_bucket(bucket) else: print "skipping bucket: " + bucket.name except S3ResponseError as e: print "Exception caught doing bucket cleanup." print "Done with test setup\n\n"
def setUpClass(cls): print_header("Setting up Bucket Test") host = config.endpoint path = config.path port = config.port access_key = config.access_key secret_key = config.secret_key print "Config = (" + host + ":" + str(port) + path + "--" + access_key + "--" + secret_key + ")" #cls.walrus = S3Connection(host=host,path=path,port=port,aws_access_key_id=access_key,aws_secret_access_key=secret_key,is_secure=False,calling_format=OrdinaryCallingFormat(), debug=2) cls.walrus = eucaops.S3ops(config_file="config",password="******") #Clean up existing buckets etc to make sure the test is clear. print "Cleaning existing buckets with " + BucketTest.bucket_prefix + " prefix from previous tests" try : listing = BucketTest.walrus.get_all_buckets() for bucket in listing: if bucket.name.startswith(BucketTest.bucket_prefix): print "Getting bucket listing for " + bucket.name key_list = bucket.list() for k in key_list: if isinstance(k, boto.s3.prefix.Prefix): print "Skipping prefix" continue print "Deleting key: " + k.name bucket.delete_key(k) bucket.delete() else: print "skipping bucket: " + bucket.name except S3ResponseError as e: print "Exception caught doing bucket cleanup." print "Done with test setup\n\n"
def setUp(self): """Initialize the env for a test with a new, randomly named bucket""" print_header("Generating bucket for test") self.test_bucket_name = ObjectTest.bucket_prefix + str( random.randint(0, 100)) self.test_bucket = self.walrus.create_bucket(self.test_bucket_name) print "Random bucket: " + self.test_bucket_name
def test(context): """Run all tests""" for scenario in test_utils.get_molecule_scenarios(context): for driver in ["docker", "lxd"]: header_text = f"Molecule test {scenario} ({driver})" test_utils.print_header(header_text) test_utils.run_molecule(context, "test", scenario, driver) test_utils.print_success_message(header_text)
def test_object_versionlisting(self): """Tests object version listing from a bucket""" version_max = 3 keyrange = 100 print_header( "Testing listing versions in a bucket and pagination using " + str(keyrange) + " keys with " + str(version_max) + " versions per key") if not self.enable_versioning(self.test_bucket): self.fail("Could not enable versioning properly. Failing") key = "testkey" keys = [key + str(k) for k in range(0, keyrange)] contents = [ ObjectTest.test_object_data + "--v" + str(v) for v in range(0, version_max) ] try: for keyname in keys: #Put 3 versions of each key for v in range(0, version_max): self.test_bucket.new_key(keyname).set_contents_from_string( contents[v]) except S3ResponseError as e: self.fail("Failed putting object versions for test: " + str(e.status)) listing = self.test_bucket.get_all_versions() print "Bucket version listing is " + str( len(listing)) + " entries long" if keyrange * version_max >= 1000: assert (len(listing) == 999) else: assert (len(listing) == keyrange * version_max) prev_obj = None should_fail = None for obj in listing: if isinstance(obj, Key): print "Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified if prev_obj != None: if self.compare_versions(prev_obj, obj) > 0: should_fail = obj prev_obj = obj else: print "Not a key, skipping: " + str(obj) if should_fail != None: self.fail("Version listing not sorted correctly, offending key: " + should_fail.name + " version: " + should_fail.version_id + " date: " + should_fail.last_modified) #Now try with a known-smaller max-keys to ensure that the pagination works.j page_listing = self.test_bucket.get_all_versions(max_keys=(keyrange / 2))
def test_bucket_key_list_delim_prefix(self): """Tests the prefix/delimiter functionality of key listings and parsing""" test_bucket_name = BucketTest.bucket_prefix + "testbucketdelim" print_header('Testing bucket key list delimiters and prefixes using bucket: ' + test_bucket_name) try: testbucket = self.walrus.create_bucket(bucket_name=test_bucket_name) except S3CreateError: print "bucket already exists, using it" try: testbucket = self.walrus.get_bucket(bucket_name=test_bucket_name) except S3ResponseError as err: print "Fatal error: could to create or get bucket" for b in self.walrus.get_all_buckets(): print b.name self.fail("Could not setup bucket, " + test_bucket_name + " for test: " + err.error_message ) prefix = "users" delim = "/" for i in range(10): tmp = str(i) print "adding keys iteration " + tmp key = testbucket.new_key("testobject" + tmp) key.set_content_from_string("adlsfjaoivajsdlajsdfiajsfdlkajsfkajdasd") key = testbucket.new_key(prefix + "testkey" + tmp) key.set_content_from_string("asjaoidjfafdjaoivnw") key = testbucket.new_key(prefix + delim + "object" + tmp) key.set_content_from_string("avjaosvdafajsfd;lkaj") key = testbucket.new_key(prefix + delim + "objects" + delim + "photo" + tmp + ".jpg") key.set_content_from_string("aoiavsvjasldfjadfiajss") keys = testbucket.get_all_keys(prefix=prefix, delimiter=delim, max_keys=10) print "Prefix with 10 keys max returned: " + keys.size() + " results" for k in keys: print k.key() keys = testbucket.get_all_keys(prefix=prefix, delimiter=delim, max_keys=20) print "Prefix with 20 keys max returned: " + keys.size() + " results" for k in keys: print k.key() print "Cleaning up the bucket" for i in range(10): testbucket.delete_key("testobject" + str(i)) testbucket.delete_key(prefix + "testkey" + str(i)) testbucket.delete_key(prefix + delim + "object" + str(i)) testbucket.delete_key(prefix + delim + "objects" + delim + "photo" + str(i) + ".jpg") print "Deleting the bucket" self.walrus.delete_bucket(testbucket)
def test_bucket_key_listing_paging(self): """Test paging of long results lists correctly and in alpha order""" test_bucket_name = BucketTest.bucket_prefix + "pagetestbucket" print_header('Testing bucket key listing pagination using bucket: ' + test_bucket_name) try: testbucket = BucketTest.walrus.create_bucket(bucket_name=test_bucket_name) except S3CreateError: print "Bucket already exists, getting it" try: testbucket = BucketTest.walrus.get_bucket(bucket_name=test_bucket_name) except S3ResponseError as err: print "Fatal error: could not get bucket or create it" for b in BucketTest.walrus.get_all_buckets(): print b.name self.fail("Could not get bucket, " + test_bucket_name + " to start test: " + err.error_message) key_name_prefix = "testkey" for i in range(100): key_name = key_name_prefix + str(i) print "creating object: " + key_name testbucket.new_key(key_name).set_contents_from_string("testcontents123testtesttesttest") for i in range(100): key_name = key_name_prefix + "/key" + str(i) print "creating object: " + key_name testbucket.new_key(key_name).set_contents_from_string("testafadsoavlsdfoaifafdslafajfaofdjasfd") key_list = testbucket.get_all_keys(max_keys=50) print "Got " + str(len(key_list)) + " entries back" if len(key_list) != 50: self.fail("Expected 50 keys back, got " + str(len(key_list))) for k in key_list: print k.key() for i in range(100): key_name = key_name_prefix + str(i) print "Deleting key: " + key_name testbucket.delete_key(key_name) key_name = key_name_prefix + "/key" + str(i) print "Deleting key: " + key_name testbucket.delete_key(key_name) print "Cleaning up the bucket" key_list = testbucket.get_all_keys() for k in key_list: print "Deleting key: " + k.key() testbucket.delete_key(k) print "Deleting the bucket" self.walrus.delete_bucket(testbucket)
def test_object_byte_offset_read(self): """Tests fetching specific byte offsets of the object""" print_header("Byte-range Offset GET Test") testkey = "rangetestkey" source_bytes = bytearray(self.test_object_data) #Put the object initially self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data) #Test range for first 100 bytes of object print "Trying start-range object get" try: data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=0-99"}) except: self.fail("Failed range object get first 100 bytes") startrangedata = bytearray(data_str) print "Got: " + startrangedata print "Expected: " + str(source_bytes[:100]) for i in range(0,100): if startrangedata[i] != source_bytes[i]: print "Byte: " + startrangedata[i] + " differs!" self.fail("Start-range Ranged-get failed") print "Trying mid-object range" try: data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=500-599"}) except: self.fail("Failed range object get for middle 100 bytes") midrangedata = bytearray(data_str) for i in range(500,600): if midrangedata[i] != source_bytes[i]: print "Byte: " + midrangedata[i] + "differs!" self.fail("Mid-range Ranged-get failed") print "Trying end-range object get" #Test range for last 100 bytes of object try: data_str = Key(bucket=self.test_bucket,name=testkey).get_contents_as_string(headers={"Range":"bytes=800-899"}) except: self.fail("Failed range object get for last 100 bytes") endrangedata = bytearray(data_str) print "Got: " + str(endrangedata) try: for i in range(800,900): if endrangedata[i] != source_bytes[i]: print "Byte: " + endrangedata[i] + "differs!" self.fail("End-range Ranged-get failed") except Exception as e: print "Exception! Received: " + e print "Range test complete"
def test(context): """Run all tests""" scenarios = test_utils.get_molecule_scenarios(context) filtered = ["backuppc", "backuppc-xs", "rsync-bpc", "apache"] scenarios = [scenario for scenario in scenarios if scenario not in filtered] for scenario in scenarios: for driver in ["docker", "lxd"]: platform = "linux" if scenario == "failing" else "ubuntu" header_text = f"Molecule test {scenario} ({driver})" test_utils.print_header(header_text) test_utils.run_molecule(context, "test", scenario, driver, platform) test_utils.print_success_message(header_text)
def test(context): """Run all tests""" for scenario in test_utils.get_molecule_scenarios(context): for driver in ["docker", "lxd"]: platform = "ubuntu" if scenario == "apt_origins" else "linux" header_text = f"Molecule test {scenario} ({driver})" test_utils.print_header(header_text) test_utils.run_molecule(context, "test", scenario, driver, platform=platform) test_utils.print_success_message(header_text)
def test_bucket_versioning(self): test_bucket = BucketTest.bucket_prefix + "versioning_test_bucket" print_header('Testing bucket versioning using bucket:' + test_bucket) version_bucket = BucketTest.walrus.create_bucket(test_bucket) version_status = version_bucket.get_versioning_status().get("Versioning") #Test the default setup after bucket creation. Should be disabled. if version_status != None: version_bucket.delete() self.fail("Expected versioning disabled, found: " + str(version_status)) elif version_status == None: print("Null version status returned, correct since it should be disabled") #Turn on versioning, confirm that it is 'Enabled' version_bucket.configure_versioning(True) version_status = version_bucket.get_versioning_status().get("Versioning") if version_status == None or version_status != "Enabled": version_bucket.delete() self.fail("Expected versioning enabled, found: " + str(version_status)) elif version_status == None: version_bucket.delete() self.fail("Null version status returned") print "Versioning of bucket is set to: " + version_status #Turn off/suspend versioning, confirm. version_bucket.configure_versioning(False) version_status = version_bucket.get_versioning_status().get("Versioning") if version_status == None or version_status != "Suspended": version_bucket.delete() self.fail("Expected versioning suspended, found: " + str(version_status)) elif version_status == None: version_bucket.delete() self.fail("Null version status returned") print "Versioning of bucket is set to: " + version_status version_bucket.configure_versioning(True) version_status = version_bucket.get_versioning_status().get("Versioning") if version_status == None or version_status != "Enabled": version_bucket.delete() self.fail("Expected versioning enabled, found: " + str(version_status)) elif version_status == None: version_bucket.delete() self.fail("Null version status returned") print "Versioning of bucket is set to: " + version_status version_bucket.delete() print "Bucket Versioning: PASSED"
def test_object_versionlisting(self): """Tests object version listing from a bucket""" version_max = 3 keyrange = 100 print_header("Testing listing versions in a bucket and pagination using " + str(keyrange) + " keys with " + str(version_max) + " versions per key") if not self.enable_versioning(self.test_bucket): self.fail("Could not enable versioning properly. Failing") key = "testkey" keys = [ key + str(k) for k in range(0,keyrange)] contents = [ ObjectTest.test_object_data + "--v" + str(v) for v in range(0,version_max)] try: for keyname in keys: #Put 3 versions of each key for v in range(0,version_max): self.test_bucket.new_key(keyname).set_contents_from_string(contents[v]) except S3ResponseError as e: self.fail("Failed putting object versions for test: " + str(e.status)) listing = self.test_bucket.get_all_versions() print "Bucket version listing is " + str(len(listing)) + " entries long" if keyrange * version_max >= 1000: assert(len(listing) == 999) else: assert(len(listing) == keyrange * version_max) prev_obj = None should_fail = None for obj in listing: if isinstance(obj,Key): print "Key: " + obj.name + " -- " + obj.version_id + "--" + obj.last_modified if prev_obj != None: if self.compare_versions(prev_obj, obj) > 0: should_fail = obj prev_obj = obj else: print "Not a key, skipping: " + str(obj) if should_fail != None: self.fail("Version listing not sorted correctly, offending key: " + should_fail.name + " version: " + should_fail.version_id + " date: " + should_fail.last_modified) #Now try with a known-smaller max-keys to ensure that the pagination works.j page_listing = self.test_bucket.get_all_versions(max_keys=(keyrange/2))
def setUpClass(cls): print_header("Setting up Bucket Test") random.seed(time.time()) host = config.endpoint path = config.path port = config.port access_key = config.access_key secret_key = config.secret_key print "Config = (" + host + ":" + str( port) + path + "--" + access_key + "--" + secret_key + ")" cls.walrus = S3Connection(host=host, path=path, port=port, aws_access_key_id=access_key, aws_secret_access_key=secret_key, is_secure=False, calling_format=OrdinaryCallingFormat(), debug=2) #Create some test data for the objects cls.test_object_data = "" for i in range(0, cls.data_size): cls.test_object_data += chr(random.randint(32, 126)) print "Generated data for objects: " + cls.test_object_data #TODO: delete all versions and delete markers so that a versioned bucket can be deleted #Clean up existing buckets etc to make sure the test is clear. print "Cleaning existing buckets with " + ObjectTest.bucket_prefix + " prefix from previous tests" try: listing = ObjectTest.walrus.get_all_buckets() for bucket in listing: if bucket.name.startswith(ObjectTest.bucket_prefix): clean_bucket(bucket) else: print "skipping bucket: " + bucket.name except S3ResponseError as e: print "Exception caught doing bucket cleanup." print "Done with test setup\n\n"
def test_object_large_objects(self): """Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface""" print_header("Testing large-ish objects over 1MB in size") test_data = "" large_obj_size_bytes = 25 * 1024 * 1024 #25MB #Create some test data for i in range(0, large_obj_size_bytes): test_data += chr(random.randint(32, 126)) keyname = "largeobj" key = self.test_bucket.new_key(keyname) key.set_contents_as_string(test_data) ret_key = self.test_bucket.get_key(keyname) ret_data = ret_key.get_contents_as_string() if ret_data != test_data: self.fail("Fetched data and generated data don't match") else: print "Data matches!"
def test_object_large_objects(self): """Test operations on large objects (>1MB), but not so large that we must use the multi-part upload interface""" print_header("Testing large-ish objects over 1MB in size") test_data = "" large_obj_size_bytes = 25 * 1024 * 1024 #25MB #Create some test data for i in range(0, large_obj_size_bytes): test_data += chr(random.randint(32,126)) keyname = "largeobj" key = self.test_bucket.new_key(keyname) key.set_contents_as_string(test_data) ret_key = self.test_bucket.get_key(keyname) ret_data = ret_key.get_contents_as_string() if ret_data != test_data: self.fail("Fetched data and generated data don't match") else: print "Data matches!"
def tearDown(self): """Tearing down the env after a test""" print_header("Cleaning up the test bucket: " + self.test_bucket_name) clean_bucket(self.test_bucket) self.test_bucket = None
def test_object_versioning_suspended(self): """Tests object versioning on a suspended bucket, a more complicated test than the Enabled test""" print_header("Testing bucket Versioning-Suspended") #Create some keys keyname1 = "versionkey1" keyname2 = "versionkey2" keyname3 = "versionkey3" keyname4 = "versionkey4" keyname5 = "versionkey5" v1data = self.test_object_data + "--version1" v2data = self.test_object_data + "--version2" v3data = self.test_object_data + "--version3" vstatus = self.test_bucket.get_versioning_status() if vstatus != None: self.fail("Versioning status should be null/Disabled") else: print "Bucket versioning is Disabled" self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname4, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname5, object_data=v1data) key1 = self.test_bucket.get_key(keyname1) key2 = self.test_bucket.get_key(keyname2) key3 = self.test_bucket.get_key(keyname3) key4 = self.test_bucket.get_key(keyname4) key5 = self.test_bucket.get_key(keyname5) print "Initial bucket state after object uploads without versioning:" self.print_key_info(keys=[key1, key2, key3, key4, key5]) #Enable versioning self.test_bucket.configure_versioning(True) if self.test_bucket.get_versioning_status(): print "Versioning status correctly set to enabled" else: print "Versionign status not enabled, should be." #Update a subset of the keys key1_etag2 = self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v2data) key2_etag2 = self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v2data) key3_etag2 = self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v2data) key3_etag3 = self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v3data) #Delete a key self.test_bucket.delete_key(keyname5) #Suspend versioning self.test_bucket.configure_versioning(False) #Get latest of each key key1 = self.test_bucket.get_key(keyname1) key2 = self.test_bucket.get_key(keyname2) key3 = self.test_bucket.get_key(keyname3) key4 = self.test_bucket.get_key(keyname4) key5 = self.test_bucket.get_key(keyname5)
def test_bucket_get_put_delete(self): '''Tests creating and deleting buckets as well as getting the bucket listing''' test_bucket=self.bucket_prefix + "simple_test_bucket" print_header("Starting get/put/delete bucket test using bucket name: " + test_bucket) try : bucket = BucketTest.walrus.create_bucket(test_bucket) if bucket == None: BucketTest.walrus.delete_bucket(test_bucket) self.fail(test_bucket + " was not created correctly") except (S3ResponseError, S3CreateError) as e: self.fail(test_bucket + " create caused exception: " + e) try : bucket = BucketTest.walrus.get_bucket(test_bucket) if bucket == None: BucketTest.walrus.delete_bucket(test_bucket) self.fail(test_bucket +" was not fetched by get_bucket call") except S3ResponseError as e: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Exception getting bucket" + e) BucketTest.walrus.delete_bucket(test_bucket) try : if BucketTest.walrus.get_bucket(test_bucket) != None: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Delete of " + test_bucket + " failed, still exists") except S3ResponseError as e: print "Correctly got exception trying to get a deleted bucket! " print "Testing an invalid bucket names, calls should fail." try: bad_bucket = BucketTest.bucket_prefix + "bucket123/" BucketTest.create_bucket(bad_bucket) should_fail = True try: BucketTest.delete_bucket(bad_bucket) except: print "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) except: print "Correctly caught the exception" try: bad_bucket = BucketTest.bucket_prefix + "bucket.123" BucketTest.create_bucket(bad_bucket) should_fail = True try: BucketTest.delete_bucket(bad_bucket) except: print "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) except: print "Correctly caught the exception" try: bad_bucket = BucketTest.bucket_prefix + "bucket&123" BucketTest.create_bucket(bad_bucket) should_fail = True try: BucketTest.delete_bucket(bad_bucket) except: print "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) except: print "Correctly caught the exception" try: bad_bucket = BucketTest.bucket_prefix + "bucket*123" BucketTest.create_bucket(bad_bucket) should_fail = True try: BucketTest.delete_bucket(bad_bucket) except: print "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) except: print "Correctly caught the exception" try: bad_bucket = BucketTest.bucket_prefix + "/bucket123" BucketTest.create_bucket(bad_bucket) should_fail = True try: BucketTest.delete_bucket(bad_bucket) except: print "Exception deleting bad bucket, shouldn't be here anyway. Test WILL fail" if should_fail: self.fail("Should have caught exception for bad bucket name: " + bad_bucket) except: print "Correctly caught the exception" print "Finished bucket get/put/delete test" print "Bucket GET/PUT/DELETE: PASSED" pass
def test_object_versioning_enabled(self): """Tests object versioning for get/put/delete on a versioned bucket""" print_header("Testing bucket Versioning-Enabled") if not self.enable_versioning(self.test_bucket): self.fail("Could not properly enable versioning") #Create some keys keyname = "versionkey" #Multiple versions of the data v1data = self.test_object_data + "--version1" v2data = self.test_object_data + "--version2" v3data = self.test_object_data + "--version3" #Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v1data) #Get v1 obj_v1 = self.test_bucket.get_key(keyname) assert (check_hashes(eTag=obj_v1.etag, data=v1data)) print "Initial bucket state after object uploads without versioning:" self.print_key_info(keys=[obj_v1]) #Put v2 (and get/head to confirm success) self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v2data) obj_v2 = self.test_bucket.get_key(keyname) assert (check_hashes(eTag=obj_v2.etag, data=v2data)) self.print_key_info(keys=[obj_v1, obj_v2]) #Put v3 (and get/head to confirm success) self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v3data) obj_v3 = self.test_bucket.get_key(keyname) assert (check_hashes(eTag=obj_v3.etag, data=v3data)) self.print_key_info(keys=[obj_v1, obj_v2, obj_v3]) #Get a specific version, v1 v1_return = self.test_bucket.get_key(key_name=keyname, version_id=obj_v1.version_id) self.print_key_info(keys=[v1_return]) #Delete current latest version (v3) self.test_bucket.delete_key(keyname) try: del_obj = self.test_bucket.get_key(keyname) self.fail("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead") except S3ResponseError as e: print "Correctly got " + str( e.status) + " in response to GET of a deleted key" #Restore v1 using copy try: self.test_bucket.copy_key(new_key_name=obj_v1.key, src_bucket_name=self.test_bucket_name, src_key_name=keyname, src_version_id=obj_v1.version_id) except S3ResponseError as e: self.fail( "Failed to restore key from previous version using copy got error: " + str(e.status)) restored_obj = self.test_bucket.get_key(keyname) assert (check_hashes(eTag=restored_obj.etag, data=v1data)) self.print_key_info(keys=[restored_obj]) #Put v3 again self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v3data) assert (check_hashes(eTag=obj_v3.etag, data=v3data)) self.print_key_info([self.test_bucket.get_key(keyname)]) #Delete v2 explicitly self.test_bucket.delete_key(key_name=obj_v2.key, version_id=obj_v2.version_id) try: del_obj = None del_obj = self.test_bucket.get_key(keyname, version_id=obj_v2.version_id) self.fail("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead") except S3ResponseError as e: print "Correctly got " + str( e.status) + " in response to GET of a deleted key" #Show what's on top top_obj = self.test_bucket.get_key(keyname) self.print_key_info([top_obj]) assert (check_hashes(eTag=top_obj.etag, data=v3data)) print "Finished the versioning enabled test. Success!!"
def test_object_byte_offset_read(self): """Tests fetching specific byte offsets of the object""" print_header("Byte-range Offset GET Test") testkey = "rangetestkey" source_bytes = bytearray(self.test_object_data) #Put the object initially self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data) #Test range for first 100 bytes of object print "Trying start-range object get" try: data_str = Key(bucket=self.test_bucket, name=testkey).get_contents_as_string( headers={"Range": "bytes=0-99"}) except: self.fail("Failed range object get first 100 bytes") startrangedata = bytearray(data_str) print "Got: " + startrangedata print "Expected: " + str(source_bytes[:100]) for i in range(0, 100): if startrangedata[i] != source_bytes[i]: print "Byte: " + startrangedata[i] + " differs!" self.fail("Start-range Ranged-get failed") print "Trying mid-object range" try: data_str = Key(bucket=self.test_bucket, name=testkey).get_contents_as_string( headers={"Range": "bytes=500-599"}) except: self.fail("Failed range object get for middle 100 bytes") midrangedata = bytearray(data_str) for i in range(500, 600): if midrangedata[i] != source_bytes[i]: print "Byte: " + midrangedata[i] + "differs!" self.fail("Mid-range Ranged-get failed") print "Trying end-range object get" #Test range for last 100 bytes of object try: data_str = Key(bucket=self.test_bucket, name=testkey).get_contents_as_string( headers={"Range": "bytes=800-899"}) except: self.fail("Failed range object get for last 100 bytes") endrangedata = bytearray(data_str) print "Got: " + str(endrangedata) try: for i in range(800, 900): if endrangedata[i] != source_bytes[i]: print "Byte: " + endrangedata[i] + "differs!" self.fail("End-range Ranged-get failed") except Exception as e: print "Exception! Received: " + e print "Range test complete"
def test_object_basic_ops(self): """Tests basic object get/put/delete/head on a normal bucket""" print_header("Basic Object Operations Test (GET/PUT/HEAD)") if self.test_bucket == None: self.fail("Error: test_bucket not set, cannot run test") #Test PUT & GET testkey = "testkey1" self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data) ret_key = self.test_bucket.get_key(testkey) ret_content = ret_key.get_contents_as_string() if ret_content == ObjectTest.test_object_data: print "Set content = get content, put passed" else: if ret_content != None: print "Got content: " + ret_content else: print "No content returned" print "Expected content: " + ObjectTest.test_object_data self.fail("Put content not the same as what was returned") #Test HEAD key_meta = self.test_bucket.get_key(testkey) if key_meta != ret_key: print "Something is wrong, the HEAD operation returned different metadata than the GET operation" else: print "HEAD meta = GET meta, all is good" #Test copy operation (GET w/source headers) new_key = "testkey2" self.test_bucket.copy_key(new_key, self.test_bucket_name, testkey) keylist = self.test_bucket.list() counter = 0 for k in keylist: if isinstance(k, Prefix): print "Prefix: " + "NULL" if k == None else k.name else: print "Key: " + k.name + " Etag: " + k.etag counter += 1 if counter != 2: self.fail("Expected 2 keys after copy operation, found only: " + len(keylist)) try: ret_key = self.test_bucket.get_key(new_key) except: self.fail("Could not get object copy") if ret_key == None: self.fail("Could not get object copy") if self.test_bucket.get_key(testkey).get_contents_as_string( ) != ret_key.get_contents_as_string(): self.fail("Contents of original key and copy don't match") else: print "Copy key contents match original!" #Test DELETE self.test_bucket.delete_key(testkey) ret_key = None try: ret_key = self.test_bucket.get_key(testkey) print "Erroneously got: " + ret_key.name self.fail( "Should have thrown exception for getting a non-existent object" ) except S3ResponseError as e: if e.status == 404: print "Correctly could not get the deleted object" else: self.fail( "Couldn't get deleted object, but got error other than 404: " + str(e.status)) print "Finishing basic ops test"
def test_bucket_acl(self): test_bucket = BucketTest.bucket_prefix + "acl_bucket_test" print_header('Starting ACL test with bucket name: ' + test_bucket) try: acl_bucket = BucketTest.walrus.create_bucket(test_bucket) except S3CreateError: print "Can't create the bucket, already exists. Deleting it an trying again" try : BucketTest.walrus.delete_bucket(test_bucket) acl_bucket = BucketTest.walrus.create_bucket(test_bucket) except: print "Couldn't delete and create new bucket. Failing test" self.fail("Couldn't make the test bucket: " + test_bucket) policy = acl_bucket.get_acl() if policy == None: self.fail("No acl returned") print policy #Check that the acl is correct: owner full control. if policy.acl.grants.__len__() > 1: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Expected only 1 grant in acl. Found: " + policy.acl.grants.grants.__len__()) if policy.acl.grants[0].display_name != "eucalyptus" or policy.acl.grants[0].permission != "FULL_CONTROL": BucketTest.walrus.delete_bucket(test_bucket) self.fail("Unexpected grant encountered: " + policy.acl.grants[0].display_name + " " + policy.acl.grants[0].permission) #upload a new acl for the bucket new_acl = policy new_acl.acl.add_user_grant(permission="READ", user_id=BucketTest.test_user_id, display_name="eucalyptus_test") try: acl_bucket.set_acl(new_acl) acl_check = acl_bucket.get_acl() except S3ResponseError: self.fail("Failed to set or get new acl") print "Got ACL: " + acl_check.acl.to_xml() expected_result='<AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>INSERT_USER_ID_HERE</ID><DisplayName>eucalyptus</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>123456</ID><DisplayName></DisplayName></Grantee><Permission>READ</Permission></Grant></AccessControlList>' if acl_check == None or acl_check.acl.to_xml() != expected_result.replace("INSERT_USER_ID_HERE",BucketTest.test_user_id): BucketTest.walrus.delete_bucket(test_bucket) self.fail("Incorrect acl length or acl not found: " + str(acl_check.acl.to_xml())) print "Grants 0 and 1: " + acl_check.acl.grants[0].to_xml() + " -- " + acl_check.acl.grants[1].to_xml() #Check each canned ACL string in boto to make sure Walrus does it right for acl in boto.s3.acl.CannedACLStrings: try: acl_bucket.set_acl(acl) acl_check = acl_bucket.get_acl() except Exception as e: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Got exception trying to set acl to " + acl + ": " + str(e)) print "Expecting a " + acl + " acl, got: " + acl_check.acl.to_xml() expected_acl = test_utils.get_canned_acl(BucketTest.test_user_id,'private') if expected_acl == None: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Got None when trying to generate expected acl for canned acl string: " + acl) if expected_acl != acl_check.acl: BucketTest.walrus.delete_bucket(test_bucket) self.fail("Invalid " + acl + " acl returned from Walrus: " + acl_check.acl.to_xml()) else: print "Got correct acl for: " + acl try: acl_bucket.set_acl('invalid-acl') except: print "Caught expected exception from invalid canned-acl" BucketTest.walrus.delete_bucket(test_bucket) print "Bucket ACL: PASSED" pass
def setUp(self): """Initialize the env for a test with a new, randomly named bucket""" print_header("Generating bucket for test") self.test_bucket_name = ObjectTest.bucket_prefix + str(random.randint(0,100)) self.test_bucket = self.walrus.create_bucket(self.test_bucket_name) print "Random bucket: " + self.test_bucket_name
def test_object_basic_ops(self): """Tests basic object get/put/delete/head on a normal bucket""" print_header("Basic Object Operations Test (GET/PUT/HEAD)") if self.test_bucket == None: self.fail("Error: test_bucket not set, cannot run test") #Test PUT & GET testkey="testkey1" self.put_object(bucket=self.test_bucket, object_key=testkey, object_data=self.test_object_data) ret_key = self.test_bucket.get_key(testkey) ret_content = ret_key.get_contents_as_string() if ret_content == ObjectTest.test_object_data: print "Set content = get content, put passed" else: if ret_content != None: print "Got content: " + ret_content else: print "No content returned" print "Expected content: " + ObjectTest.test_object_data self.fail("Put content not the same as what was returned") #Test HEAD key_meta = self.test_bucket.get_key(testkey) if key_meta != ret_key: print "Something is wrong, the HEAD operation returned different metadata than the GET operation" else: print "HEAD meta = GET meta, all is good" #Test copy operation (GET w/source headers) new_key = "testkey2" self.test_bucket.copy_key(new_key, self.test_bucket_name,testkey) keylist = self.test_bucket.list() counter = 0 for k in keylist: if isinstance(k, Prefix): print "Prefix: " + "NULL" if k == None else k.name else: print "Key: " + k.name + " Etag: " + k.etag counter += 1 if counter != 2: self.fail("Expected 2 keys after copy operation, found only: " + len(keylist)) try: ret_key = self.test_bucket.get_key(new_key) except: self.fail("Could not get object copy") if ret_key == None: self.fail("Could not get object copy") if self.test_bucket.get_key(testkey).get_contents_as_string() != ret_key.get_contents_as_string(): self.fail("Contents of original key and copy don't match") else: print "Copy key contents match original!" #Test DELETE self.test_bucket.delete_key(testkey) ret_key = None try: ret_key = self.test_bucket.get_key(testkey) print "Erroneously got: " + ret_key.name self.fail("Should have thrown exception for getting a non-existent object") except S3ResponseError as e: if e.status == 404: print "Correctly could not get the deleted object" else: self.fail("Couldn't get deleted object, but got error other than 404: " + str(e.status)) print "Finishing basic ops test"
def test_object_versioning_suspended(self): """Tests object versioning on a suspended bucket, a more complicated test than the Enabled test""" print_header("Testing bucket Versioning-Suspended") #Create some keys keyname1 = "versionkey1" keyname2 = "versionkey2" keyname3 = "versionkey3" keyname4 = "versionkey4" keyname5 = "versionkey5" v1data = self.test_object_data + "--version1" v2data = self.test_object_data + "--version2" v3data = self.test_object_data + "--version3" vstatus = self.test_bucket.get_versioning_status() if vstatus != None: self.fail("Versioning status should be null/Disabled") else: print "Bucket versioning is Disabled" self.put_object(bucket=self.test_bucket, object_key=keyname1, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname2, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname3, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname4, object_data=v1data) self.put_object(bucket=self.test_bucket, object_key=keyname5, object_data=v1data) key1 = self.test_bucket.get_key(keyname1) key2 = self.test_bucket.get_key(keyname2) key3 = self.test_bucket.get_key(keyname3) key4 = self.test_bucket.get_key(keyname4) key5 = self.test_bucket.get_key(keyname5) print "Initial bucket state after object uploads without versioning:" self.print_key_info(keys=[key1,key2,key3,key4,key5]) #Enable versioning self.test_bucket.configure_versioning(True) if self.test_bucket.get_versioning_status(): print "Versioning status correctly set to enabled" else: print "Versionign status not enabled, should be." #Update a subset of the keys key1_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname1,object_data=v2data) key2_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname2,object_data=v2data) key3_etag2=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v2data) key3_etag3=self.put_object(bucket=self.test_bucket, object_key=keyname3,object_data=v3data) #Delete a key self.test_bucket.delete_key(keyname5) #Suspend versioning self.test_bucket.configure_versioning(False) #Get latest of each key key1=self.test_bucket.get_key(keyname1) key2=self.test_bucket.get_key(keyname2) key3=self.test_bucket.get_key(keyname3) key4=self.test_bucket.get_key(keyname4) key5=self.test_bucket.get_key(keyname5)
def test_object_versioning_enabled(self): """Tests object versioning for get/put/delete on a versioned bucket""" print_header("Testing bucket Versioning-Enabled") if not self.enable_versioning(self.test_bucket): self.fail("Could not properly enable versioning") #Create some keys keyname = "versionkey" #Multiple versions of the data v1data = self.test_object_data + "--version1" v2data = self.test_object_data + "--version2" v3data = self.test_object_data + "--version3" #Test sequence: put v1, get v1, put v2, put v3, get v3, delete v3, restore with v1 (copy), put v3 again, delete v2 explicitly self.put_object(bucket=self.test_bucket, object_key=keyname, object_data=v1data) #Get v1 obj_v1 = self.test_bucket.get_key(keyname) assert(check_hashes(eTag=obj_v1.etag,data=v1data)) print "Initial bucket state after object uploads without versioning:" self.print_key_info(keys=[obj_v1]) #Put v2 (and get/head to confirm success) self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v2data) obj_v2 = self.test_bucket.get_key(keyname) assert(check_hashes(eTag=obj_v2.etag,data=v2data)) self.print_key_info(keys=[obj_v1, obj_v2]) #Put v3 (and get/head to confirm success) self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data) obj_v3 = self.test_bucket.get_key(keyname) assert(check_hashes(eTag=obj_v3.etag,data=v3data)) self.print_key_info(keys=[obj_v1, obj_v2, obj_v3]) #Get a specific version, v1 v1_return = self.test_bucket.get_key(key_name=keyname,version_id=obj_v1.version_id) self.print_key_info(keys=[v1_return]) #Delete current latest version (v3) self.test_bucket.delete_key(keyname) try: del_obj = self.test_bucket.get_key(keyname) self.fail("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead") except S3ResponseError as e: print "Correctly got " + str(e.status) + " in response to GET of a deleted key" #Restore v1 using copy try: self.test_bucket.copy_key(new_key_name=obj_v1.key,src_bucket_name=self.test_bucket_name,src_key_name=keyname,src_version_id=obj_v1.version_id) except S3ResponseError as e: self.fail("Failed to restore key from previous version using copy got error: " + str(e.status)) restored_obj = self.test_bucket.get_key(keyname) assert(check_hashes(eTag=restored_obj.etag,data=v1data)) self.print_key_info(keys=[restored_obj]) #Put v3 again self.put_object(bucket=self.test_bucket, object_key=keyname,object_data=v3data) assert(check_hashes(eTag=obj_v3.etag,data=v3data)) self.print_key_info([self.test_bucket.get_key(keyname)]) #Delete v2 explicitly self.test_bucket.delete_key(key_name=obj_v2.key,version_id=obj_v2.version_id) try: del_obj = None del_obj = self.test_bucket.get_key(keyname,version_id=obj_v2.version_id) self.fail("Should have gotten 404 not-found error, but got: " + del_obj.key + " instead") except S3ResponseError as e: print "Correctly got " + str(e.status) + " in response to GET of a deleted key" #Show what's on top top_obj = self.test_bucket.get_key(keyname) self.print_key_info([top_obj]) assert(check_hashes(eTag=top_obj.etag,data=v3data)) print "Finished the versioning enabled test. Success!!"