def set_as_logging_target(self, headers=None): policy = self.get_acl(headers=headers) g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) policy.acl.add_grant(g1) policy.acl.add_grant(g2) self.set_acl(policy, headers=headers)
def get_canned_acl(owner_id=None,canned_acl=None,bucket_owner_id=None): ''' Returns an acl object that can be applied to a bucket or key owner_id Account id of the owner of the bucket. Required canned_acl Canned acl to implement. Required. Options: ['public-read', 'public-read-write', 'authenticated-read', 'log-delivery-write', 'bucket-owner-full-control', 'bucket-owner-full-control'] bucket_owner_id Required for bucket-owner-full-control and bucket-owner-full-control acls to be created ''' if owner_id == None or canned_acl == None: raise S3opsException( "No owner_id or canned_acl passed to get_canned_acl()" ) owner_fc_grant = Grant(permission="FULL_CONTROL", id=owner_id) built_acl = ACL() built_acl.add_grant(owner_fc_grant) if canned_acl == "public-read": built_acl.add_grant(Grant(permission="READ",uri=s3_groups["all_users"])) elif canned_acl == "public-read-write": built_acl.add_grant(Grant(permission="READ",uri=s3_groups["all_users"])) built_acl.add_grant(Grant(permission="WRITE",uri=s3_groups["all_users"])) elif canned_acl == "authenticated-read": built_acl.add_grant(Grant(permission="READ",uri=s3_groups["authenticated_users"])) elif canned_acl == "log-delivery-write": built_acl.add_grant(Grant(permission="WRITE",uri=s3_groups["log_delivery"])) elif canned_acl == "bucket-owner-read": if bucket_owner_id is None: raise Exception("No bucket_owner_id passed when trying to create bucket-owner-read canned acl ") built_acl.add_grant(Grant(permission="READ",user_id=bucket_owner_id)) elif canned_acl == "bucket-owner-full-control": if bucket_owner_id is None: raise Exception("No bucket_owner_id passed when trying to create bucket-owner-full-control canned acl ") built_acl.add_grant(Grant(permission="FULL_CONTROL",user_id=bucket_owner_id)) return built_acl
def get_canned_acl(owner_id=None, canned_acl=None, bucket_owner_id=None): if owner_id == None or canned_acl == None: return None owner_fc_grant = Grant(permission="FULL_CONTROL", user_id=owner_id) built_acl = ACL() built_acl.add_grant(owner_fc_grant) if canned_acl == "public-read": built_acl.add_grant( Grant(permission="READ", uri=s3_groups["all_users"])) elif canned_acl == "public-read-write": built_acl.add_grant( Grant(permission="READ", uri=s3_groups["all_users"])) built_acl.add_grant( Grant(permission="WRITE", uri=s3_groups["all_users"])) elif canned_acl == "authenticated-read": built_acl.add_grant( Grant(permission="READ", uri=s3_groups["authenticated_users"])) elif canned_acl == "log-delivery-write": built_acl.add_grant( Grant(permission="WRITE", uri=s3_groups["log_delivery"])) elif canned_acl == "bucket-owner-read": built_acl.add_grant(Grant(permission="READ", user_id=bucket_owner_id)) elif canned_acl == "bucket-owner-full-control": built_acl.add_grant( Grant(permission="FULL_CONTROL", user_id=bucket_owner_id)) else: #No canned-acl value found return None return built_acl
def test_logging(self): # use self.bucket as the target bucket so that teardown # will delete any log files that make it into the bucket # automatically and all we have to do is delete the # source bucket. sb_name = "src-" + self.bucket_name sb = self.conn.create_bucket(sb_name) # grant log write perms to target bucket using canned-acl self.bucket.set_acl("log-delivery-write") target_bucket = self.bucket_name target_prefix = u"jp/ログ/" # Check existing status is disabled bls = sb.get_logging_status() self.assertEqual(bls.target, None) # Create a logging status and grant auth users READ PERM authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" authr = Grant(permission="READ", type="Group", uri=authuri) sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr]) # Check the status and confirm its set. bls = sb.get_logging_status() self.assertEqual(bls.target, target_bucket) self.assertEqual(bls.prefix, target_prefix) self.assertEqual(len(bls.grants), 1) self.assertEqual(bls.grants[0].type, "Group") self.assertEqual(bls.grants[0].uri, authuri) # finally delete the src bucket sb.delete()
def get_canned_acl(self, canned_acl=None, bucket_owner_id=None, bucket_owner_display_name=None): ''' Returns an acl object that can be applied to a bucket or key. It is intended to be used to verify results that the service returns. To set a canned-acl you can simply set it on the bucket directly without this method. bucket_owner_id Account id of the owner of the bucket. Required canned_acl Canned acl to implement. Required. Options: ['private','public-read', 'public-read-write', 'authenticated-read', 'log-delivery-write', 'bucket-owner-full-control', 'bucket-owner-full-control'] bucket_owner_display_name Required. The account display name for the bucket owner, so that the correct permission can be generated fully ''' if bucket_owner_id == None or canned_acl == None or bucket_owner_display_name == None: raise S3opsException( "No user_id or canned_acl passed to get_canned_acl()") built_acl = ACL() built_acl.add_user_grant(permission='FULL_CONTROL', user_id=bucket_owner_id, display_name=bucket_owner_display_name) if canned_acl == "public-read": built_acl.add_grant( Grant(permission="READ", type='Group', uri=self.s3_groups["all_users"])) elif canned_acl == "public-read-write": built_acl.add_grant( Grant(permission="READ", type='Group', uri=self.s3_groups["all_users"])) built_acl.add_grant( Grant(permission="WRITE", type='Group', uri=self.s3_groups["all_users"])) elif canned_acl == "authenticated-read": built_acl.add_grant( Grant(permission="READ", type='Group', uri=self.s3_groups["authenticated_users"])) elif canned_acl == "log-delivery-write": built_acl.add_grant( Grant(permission="WRITE", type='Group', uri=self.s3_groups["log_delivery"])) elif canned_acl == "bucket-owner-read": if bucket_owner_id is None: raise Exception( "No bucket_owner_id passed when trying to create bucket-owner-read canned acl " ) built_acl.add_grant(Grant(permission="READ", id=bucket_owner_id)) elif canned_acl == "bucket-owner-full-control": if bucket_owner_id is None: raise Exception( "No bucket_owner_id passed when trying to create bucket-owner-full-control canned acl " ) built_acl.add_grant( Grant(permission="FULL_CONTROL", id=bucket_owner_id)) return built_acl
def test_bucket_logging(self): """This is not a valid test at the moment, logging requires at least an hour of time between logging enabled and file delivery of events to the dest bucket""" self.tester.info("\n\nStarting bucket logging test") test_bucket = self.bucket_prefix + "logging_test_bucket" log_dest_bucket = self.bucket_prefix + "logging_destination_test_bucket" self.buckets_used.add(test_bucket) self.buckets_used.add(log_dest_bucket) log_prefix = "log_prefix_test" try: bucket = self.tester.s3.create_bucket(test_bucket) except S3CreateError: self.tester.info("Bucket exists, trying to delete and re-create") try: self.tester.s3.delete_bucket(test_bucket) bucket = self.tester.s3.create_bucket(test_bucket) except: self.tester.info( "Couldn't delete and create new bucket...failing") self.fail( "Couldn't get clean bucket already existed and could not be deleted" ) try: dest_bucket = self.tester.s3.create_bucket(log_dest_bucket) except S3CreateError: self.tester.info("Bucket exists, trying to delete and re-create") try: self.tester.s3.delete_bucket(log_dest_bucket) dest_bucket = self.tester.s3.create_bucket(log_dest_bucket) except: self.tester.info( "Couldn't delete and create new bucket...failing") self.fail( "Couldn't get clean bucket already existed and could not be deleted" ) log_delivery_policy = dest_bucket.get_acl() log_delivery_policy.acl.add_grant( Grant(type="Group", uri="http://acs.amazonaws.com/groups/s3/LogDelivery", permission="WRITE")) log_delivery_policy.acl.add_grant( Grant(type="Group", uri="http://acs.amazonaws.com/groups/s3/LogDelivery", permission="READ_ACP")) dest_bucket.set_acl(log_delivery_policy) bucket.enable_logging(log_dest_bucket, target_prefix=log_prefix) #test the logging by doing something that will require logging k = bucket.new_key('key1') k.set_contents_from_string('content123') k = bucket.new_key('key2') k.set_contents_from_string("content456") k = bucket.get_key('key1') result1 = k.get_contents_as_string() self.tester.info("Got content:\n\t " + result1) k = bucket.get_key('key2') result2 = k.get_contents_as_string() self.tester.info("Got content:\n\t " + result2) keylist = bucket.list() self.tester.info("Listing keys...") for k in keylist: if isinstance(k, boto.s3.prefix.Prefix): self.tester.info("Prefix found") else: self.tester.info('Key--' + k.name) #Allow some time for the op writes to be logged... this may need to be tweaked time.sleep(15) #Now check the log to be sure something was logged log_bucket = self.tester.s3.get_bucket(log_dest_bucket) for k in log_bucket.list(prefix=log_prefix): self.tester.info('Key -- ' + k.name) log_obj = log_bucket.get_key(k) self.tester.info("Log content:\n\t" + k.get_contents_as_string()) log_data = log_obj.get_content_as_string() self.tester.info('Log data is: ' + log_data) self.tester.info('Deleting used bucket')
def startElement(self, name, attrs, connection): if name == 'Grant': self.grants.append(Grant()) return self.grants[-1] else: return None