Beispiel #1
0
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3(aws_secret_access_key=self.ec2_secret_key,
                                   aws_access_key_id=self.ec2_access_key)

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')

            return "https://s3.amazonaws.com/{bucket}/{filename}".format(bucket=self.bucket_name, filename=filename)

        except Exception, e:
            logging.error("S3StorageAgent failed with exception:\n{0}".format(str(e)))
            logging.error(traceback.format_exc())
            raise e
Beispiel #2
0
 def test_lifecycle_multi(self):
     date = '2022-10-12T00:00:00.000Z'
     sc = 'GLACIER'
     lifecycle = Lifecycle()
     lifecycle.add_rule("1", "1/", "Enabled", 1)
     lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
     lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
     lifecycle.add_rule("4", "4/", "Enabled", None,
         Transition(days=4, storage_class=sc))
     lifecycle.add_rule("5", "5/", "Enabled", None,
         Transition(date=date, storage_class=sc))
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config();
     for rule in readlifecycle:
         if rule.id == "1":
             self.assertEqual(rule.prefix, "1/")
             self.assertEqual(rule.expiration.days, 1)
         elif rule.id == "2":
             self.assertEqual(rule.prefix, "2/")
             self.assertEqual(rule.expiration.days, 2)
         elif rule.id == "3":
             self.assertEqual(rule.prefix, "3/")
             self.assertEqual(rule.expiration.date, date)
         elif rule.id == "4":
             self.assertEqual(rule.prefix, "4/")
             self.assertEqual(rule.transition.days, 4)
             self.assertEqual(rule.transition.storage_class, sc)
         elif rule.id == "5":
             self.assertEqual(rule.prefix, "5/")
             self.assertEqual(rule.transition.date, date)
             self.assertEqual(rule.transition.storage_class, sc)
         else:
             self.fail("unexpected id %s" % rule.id)
Beispiel #3
0
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')

        except Exception, e:
            sys.stdout.write("AmazonS3Agent failed with exception:\n{0}".format(str(e)))
            sys.stdout.flush()
            raise e
Beispiel #4
0
 def test_lifecycle_multi(self):
     date = '2022-10-12T00:00:00.000Z'
     sc = 'GLACIER'
     lifecycle = Lifecycle()
     lifecycle.add_rule("1", "1/", "Enabled", 1)
     lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
     lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
     lifecycle.add_rule("4", "4/", "Enabled", None,
                        Transition(days=4, storage_class=sc))
     lifecycle.add_rule("5", "5/", "Enabled", None,
                        Transition(date=date, storage_class=sc))
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config()
     for rule in readlifecycle:
         if rule.id == "1":
             self.assertEqual(rule.prefix, "1/")
             self.assertEqual(rule.expiration.days, 1)
         elif rule.id == "2":
             self.assertEqual(rule.prefix, "2/")
             self.assertEqual(rule.expiration.days, 2)
         elif rule.id == "3":
             self.assertEqual(rule.prefix, "3/")
             self.assertEqual(rule.expiration.date, date)
         elif rule.id == "4":
             self.assertEqual(rule.prefix, "4/")
             self.assertEqual(rule.transition.days, 4)
             self.assertEqual(rule.transition.storage_class, sc)
         elif rule.id == "5":
             self.assertEqual(rule.prefix, "5/")
             self.assertEqual(rule.transition.date, date)
             self.assertEqual(rule.transition.storage_class, sc)
         else:
             self.fail("unexpected id %s" % rule.id)
Beispiel #5
0
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename',
                               prefix='logs/',
                               status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(
                    self.bucket_name,
                    location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename,
                                         cb=self.percent_cb,
                                         num_cb=10)
            k.set_acl('public-read-write')

        except Exception, e:
            sys.stdout.write(
                "AmazonS3Agent failed with exception:\n{0}".format(str(e)))
            sys.stdout.flush()
            raise e
Beispiel #6
0
def glacier(name):
   bucket = conn.get_bucket(name)
   to_glacier = boto.s3.lifecycle.Transition(days=30, storage_class='GLACIER')
   rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
   lifecycle = Lifecycle()
   lifecycle.append(rule)
   lifecycle.add_rule("lc1","/", "Enabled",5)
   bucket.configure_lifecycle(lifecycle)
Beispiel #7
0
def setDeletionPolicy(bucket):
	"""
	Creates a lifecycle policy that automatically deletes all the files in the subfolder after one day.
	"""
	lifecycle = Lifecycle()
	lifecycle.add_rule("Audo-delete objects in %s after 1 day" % aws_common.S3_RESPONSE_PREFIX, aws_common.S3_RESPONSE_PREFIX, "Enabled", 1)
	print "Added deletion policy"
	bucket.configure_lifecycle(lifecycle)
Beispiel #8
0
def get_lifecycle(expiration_path, days_to_expiration):
    if days_to_expiration is not None and expiration_path is not None:
        lifecycle = Lifecycle()
        print "Adding expiration rule of %s days for S3 path %s" % (days_to_expiration, expiration_path)
        lifecycle.add_rule('expirationrule', prefix=expiration_path, status='Enabled', expiration=Expiration(days=int(days_to_expiration)))
        return lifecycle
    else:
        print "No expiration rule added"
        return None
Beispiel #9
0
 def test_lifecycle_with_defaults(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule(expiration=30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertNotEqual(len(actual_lifecycle.id), 0)
     self.assertEqual(actual_lifecycle.prefix, '')
Beispiel #10
0
 def test_lifecycle_with_defaults(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule(expiration=30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertNotEqual(len(actual_lifecycle.id), 0)
     self.assertEqual(actual_lifecycle.prefix, '')
Beispiel #11
0
 def test_lifecycle(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', '', 'Enabled', 30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertEqual(actual_lifecycle.id, 'myid')
     self.assertEqual(actual_lifecycle.prefix, '')
     self.assertEqual(actual_lifecycle.status, 'Enabled')
     self.assertEqual(actual_lifecycle.transition, None)
Beispiel #12
0
 def test_lifecycle(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', '', 'Enabled', 30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertEqual(actual_lifecycle.id, 'myid')
     self.assertEqual(actual_lifecycle.prefix, '')
     self.assertEqual(actual_lifecycle.status, 'Enabled')
     self.assertEqual(actual_lifecycle.transition, None)
def upload(bucket_name, image_name, image):
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)

    lifecycle = Lifecycle()
    lifecycle.add_rule('s3-image-uploader', prefix=FILE_PREFIX, status='Enabled', expiration=Expiration(days=EXPIRATION))
    bucket.configure_lifecycle(lifecycle)

    k = Key(bucket)
    k.key = image_name
    k.set_contents_from_string(image)
Beispiel #14
0
def test_lifecycle_delete():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule(expiration=30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    response.should.have.length_of(1)

    bucket.delete_lifecycle_configuration()
    bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError)
Beispiel #15
0
def get_lifecycle(expiration_path, days_to_expiration):
    if days_to_expiration is not None and expiration_path is not None:
        lifecycle = Lifecycle()
        print "Adding expiration rule of %s days for S3 path %s" % (
            days_to_expiration, expiration_path)
        lifecycle.add_rule('expirationrule',
                           prefix=expiration_path,
                           status='Enabled',
                           expiration=Expiration(days=int(days_to_expiration)))
        return lifecycle
    else:
        print "No expiration rule added"
        return None
Beispiel #16
0
 def test_lifecycle_jp(self):
     # test lifecycle with Japanese prefix
     name = "Japanese files"
     prefix = "日本語/"
     days = 30
     lifecycle = Lifecycle()
     lifecycle.add_rule(name, prefix, "Enabled", days)
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config()
     for rule in readlifecycle:
         self.assertEqual(rule.id, name)
         self.assertEqual(rule.expiration.days, days)
Beispiel #17
0
def test_lifecycle_create():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule('myid', '', 'Enabled', 30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    len(response).should.equal(1)
    lifecycle = response[0]
    lifecycle.id.should.equal('myid')
    lifecycle.prefix.should.equal('')
    lifecycle.status.should.equal('Enabled')
    list(lifecycle.transition).should.equal([])
Beispiel #18
0
def test_lifecycle_create():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule("myid", "", "Enabled", 30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    len(response).should.equal(1)
    lifecycle = response[0]
    lifecycle.id.should.equal("myid")
    lifecycle.prefix.should.equal("")
    lifecycle.status.should.equal("Enabled")
    list(lifecycle.transition).should.equal([])
def upload(bucket_name, image_name, image):
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)

    lifecycle = Lifecycle()
    lifecycle.add_rule('s3-image-uploader',
                       prefix=FILE_PREFIX,
                       status='Enabled',
                       expiration=Expiration(days=EXPIRATION))
    bucket.configure_lifecycle(lifecycle)

    k = Key(bucket)
    k.key = image_name
    k.set_contents_from_string(image)
Beispiel #20
0
 def test_lifecycle_jp(self):
     # test lifecycle with Japanese prefix
     name = "Japanese files"
     prefix = u"日本語/"
     days = 30
     lifecycle = Lifecycle()
     lifecycle.add_rule(name, prefix, "Enabled", days)
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config();
     for rule in readlifecycle:
         self.assertEqual(rule.id, name)
         self.assertEqual(rule.expiration.days, days)
    def update_with_config(self, config):
        lifecycle = Lifecycle()
        got_rule = False
        for x_rule in config.findall("Rule"):
            got_rule = True
            lifecycle.add_rule(**self.get_rule_kwargs_from_xml(x_rule))

        if got_rule:
            success = self.bucket.configure_lifecycle(lifecycle)
        else:
            success = self.bucket.delete_lifecycle_configuration()
        if not success:
            print "Failed to update rules"
            sys.exit(1)
        print "Successfully updated rule"
Beispiel #22
0
 def _cleanup_s3(self, bucket_name):
     """
     Adds lifecycle rule (DEL_LIFECYCLE_PATTERN % bucket_name)
     to bucket_name that marks all objects in this
     bucket as expiring(delete) in 1 day
     """
     conn = boto.connect_s3()
     b = conn.get_bucket( bucket_name )
     del_all_pattern = DEL_LIFECYCLE_PATTERN 
     msg =  "Setting deletion lifecycle rule for %s" 
     msg = msg % bucket_name 
     self.logger.info(msg)
     lf = Lifecycle()
     lf.add_rule( id=del_all_pattern % b.name,
             expiration=Expiration(days=1),
             prefix='', status='Enabled',
             transition=None  )
     b.configure_lifecycle(lf)
def create_folder_and_lifecycle(bucket_name, directory, expiration):
    ''' creates or modifies an existing folder and modifies
        the expiration lifecyce '''
    # Connect to s3 and get the bucket object
    try:
        ak, sk = get_env_creds()
        s3 = boto.connect_s3(aws_access_key_id=ak,
                             aws_secret_access_key=sk)
        bucket = s3.get_bucket(bucket_name)
    except:
        print 'Could not connect to AWS/Bucket: %s' % str(sys.exc_info())
    # if there are no files in this folder yet, create a placeholder lifecycle file
    try:
        count = 0
        files = bucket.list(prefix=directory)
        for f in files:
            count += 1
        if count <= 1:  # insert a dummy file; needed elsewise the policy won't apply
            k = boto.s3.key.Key(bucket)
            k.key = directory + '/.lifecycle_policy.txt'
            utc_now = datetime.utcnow()
            exp_time = utc_now + timedelta(days=expiration)
            content = ('This file was created by the upload portal. The '
                       'expiration policy for this folder was created on %s.'
                       ' These file(s) will automatically expire %s days'
                       ' later, on %s.') % (utc_now.ctime(),
                                            str(expiration),
                                            exp_time.ctime())
            k.set_contents_from_string(content)
    except:
        pass
    # Create and apply the life cycle object to the prefix
    try:
        directory = directory.encode('ascii')
        lifecycle = Lifecycle()
        lifecycle.add_rule(id=directory,
                           prefix=directory,
                           status='Enabled',
                           expiration=expiration)
        bucket.configure_lifecycle(lifecycle)
    except:
        return 'Error creating lifecycle: %s' % str(sys.exc_info())
def s3_uploader(db_backup_bucket, gpg_file_path, update_seq, checksum):
    if db_backup_bucket not in con_s3.get_all_buckets():
        print 'Backup bucket is missing, creating new bucket ', db_backup_bucket
        con_s3.create_bucket(db_backup_bucket)
        bucket = con_s3.get_bucket(db_backup_bucket)

    else:
        bucket = con_s3.get_bucket(db_backup_bucket)
        lifecycle = Lifecycle()
        lifecycle.add_rule('14 Days CouchDB Expiration', os.path.basename(gpg_file_path), 'Enabled', 14)
        bucket.configure_lifecycle(lifecycle)

    key = Key(bucket)
    key.key = os.path.basename(gpg_file_path)
    key.set_acl('authenticated-read')
    key.set_metadata('UpdateSeq', update_seq)
    key.set_metadata('Checksum', checksum)
    key.set_contents_from_file(gpg_file_path)
    key.close()

    print 'Finished uploading backup to S3'
Beispiel #25
0
def create_folder_and_lifecycle(bucket_name, directory, expiration):
    ''' creates or modifies an existing folder and modifies
        the expiration lifecyce '''
    # Connect to s3 and get the bucket object
    try:
        ak, sk = get_env_creds()
        s3 = boto.connect_s3(aws_access_key_id=ak, aws_secret_access_key=sk)
        bucket = s3.get_bucket(bucket_name)
    except:
        print 'Could not connect to AWS/Bucket: %s' % str(sys.exc_info())
    # if there are no files in this folder yet, create a placeholder lifecycle file
    try:
        count = 0
        files = bucket.list_versions(prefix=directory)
        for f in files:
            count += 1
        if count <= 1:  # insert a dummy file; needed elsewise the policy won't apply
            k = boto.s3.key.Key(bucket)
            k.key = directory + '/.lifecycle_policy.txt'
            utc_now = datetime.utcnow()
            exp_time = utc_now + timedelta(days=expiration)
            content = ('This file was created by the upload portal. The '
                       'expiration policy for this folder was created on %s.'
                       ' These file(s) will automatically expire %s days'
                       ' later, on %s.') % (utc_now.ctime(), str(expiration),
                                            exp_time.ctime())
            k.set_contents_from_string(content)
    except:
        pass
    # Create and apply the life cycle object to the prefix
    try:
        directory = directory.encode('ascii')
        lifecycle = Lifecycle()
        lifecycle.add_rule(id=directory,
                           prefix=directory,
                           status='Enabled',
                           expiration=expiration)
        bucket.configure_lifecycle(lifecycle)
    except:
        return 'Error creating lifecycle: %s' % str(sys.exc_info())
def s3_uploader(db_backup_bucket, gpg_file_path, update_seq, checksum):
    if db_backup_bucket not in con_s3.get_all_buckets():
        print 'Backup bucket is missing, creating new bucket ', db_backup_bucket
        con_s3.create_bucket(db_backup_bucket)
        bucket = con_s3.get_bucket(db_backup_bucket)

    else:
        bucket = con_s3.get_bucket(db_backup_bucket)
        lifecycle = Lifecycle()
        lifecycle.add_rule('14 Days CouchDB Expiration',
                           os.path.basename(gpg_file_path), 'Enabled', 14)
        bucket.configure_lifecycle(lifecycle)

    key = Key(bucket)
    key.key = os.path.basename(gpg_file_path)
    key.set_acl('authenticated-read')
    key.set_metadata('UpdateSeq', update_seq)
    key.set_metadata('Checksum', checksum)
    key.set_contents_from_file(gpg_file_path)
    key.close()

    print 'Finished uploading backup to S3'
Beispiel #27
0
    def uploadfile(self, file, bucketname):
        try :
            uploadfile = file
            bucketname = bucketname
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                   expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(bucketname): #bucketexisits
                bucket = conn.get_bucket(bucketname)
            else:
                #create a bucket
                bucket = conn.create_bucket(bucketname, location=boto.s3.connection.Location.DEFAULT)
            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key
            k = Key(bucket)
            k.key = uploadfile
            k.set_contents_from_filename(uploadfile, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')
        except Exception,e:
            print 'falied {0}'.format(str(e))
Beispiel #28
0
def test_lifecycle_multi():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    date = '2022-10-12T00:00:00.000Z'
    sc = 'GLACIER'
    lifecycle = Lifecycle()
    lifecycle.add_rule("1", "1/", "Enabled", 1)
    lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
    lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
    lifecycle.add_rule("4", "4/", "Enabled", None,
        Transition(days=4, storage_class=sc))
    lifecycle.add_rule("5", "5/", "Enabled", None,
        Transition(date=date, storage_class=sc))

    bucket.configure_lifecycle(lifecycle)
    # read the lifecycle back
    rules = bucket.get_lifecycle_config()

    for rule in rules:
        if rule.id == "1":
            rule.prefix.should.equal("1/")
            rule.expiration.days.should.equal(1)
        elif rule.id == "2":
            rule.prefix.should.equal("2/")
            rule.expiration.days.should.equal(2)
        elif rule.id == "3":
            rule.prefix.should.equal("3/")
            rule.expiration.date.should.equal(date)
        elif rule.id == "4":
            rule.prefix.should.equal("4/")
            rule.transition.days.should.equal(4)
            rule.transition.storage_class.should.equal(sc)
        elif rule.id == "5":
            rule.prefix.should.equal("5/")
            rule.transition.date.should.equal(date)
            rule.transition.storage_class.should.equal(sc)
        else:
            assert False, "Invalid rule id"
Beispiel #29
0
def test_lifecycle_multi():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    date = "2022-10-12T00:00:00.000Z"
    sc = "GLACIER"
    lifecycle = Lifecycle()
    lifecycle.add_rule("1", "1/", "Enabled", 1)
    lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
    lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
    lifecycle.add_rule("4", "4/", "Enabled", None,
                       Transition(days=4, storage_class=sc))
    lifecycle.add_rule("5", "5/", "Enabled", None,
                       Transition(date=date, storage_class=sc))

    bucket.configure_lifecycle(lifecycle)
    # read the lifecycle back
    rules = bucket.get_lifecycle_config()

    for rule in rules:
        if rule.id == "1":
            rule.prefix.should.equal("1/")
            rule.expiration.days.should.equal(1)
        elif rule.id == "2":
            rule.prefix.should.equal("2/")
            rule.expiration.days.should.equal(2)
        elif rule.id == "3":
            rule.prefix.should.equal("3/")
            rule.expiration.date.should.equal(date)
        elif rule.id == "4":
            rule.prefix.should.equal("4/")
            rule.transition.days.should.equal(4)
            rule.transition.storage_class.should.equal(sc)
        elif rule.id == "5":
            rule.prefix.should.equal("5/")
            rule.transition.date.should.equal(date)
            rule.transition.storage_class.should.equal(sc)
        else:
            assert False, "Invalid rule id"
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename',
                               prefix='logs/',
                               status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3(aws_secret_access_key=self.ec2_secret_key,
                                   aws_access_key_id=self.ec2_access_key)

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(
                    self.bucket_name,
                    location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename,
                                         cb=self.percent_cb,
                                         num_cb=10)
            k.set_acl('public-read-write')

            return "https://s3.amazonaws.com/{bucket}/{filename}".format(
                bucket=self.bucket_name, filename=filename)

        except Exception, e:
            logging.error("S3StorageAgent failed with exception:\n{0}".format(
                str(e)))
            logging.error(traceback.format_exc())
            raise e
Beispiel #31
0
def main():
	raw_input("I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue.");
	print;
	with open("HelloWorld.txt", "w") as f:
		f.writelines("I hope you can read this file!");
	s3=boto.connect_s3();
	bucket1=s3.create_bucket('test_bucket1'); #creates an s3 bucket.
	print "'test_bucket1' should be created. GO CHECK! Press enter to continue.";
	raw_input();
	#I am going to create two new keys
	raw_input("I am going to add a textfile and picture to S3. Press enter to continue.");
	k=Key(bucket1);
	picture=Key(bucket1);
	picture.key="picture";
	picture.set_contents_from_filename("bearandi.jpg");
	k.key="helloWorld";
	k.set_contents_from_filename("helloWorld.txt");
	print;
	raw_input("Look at the files on S3. The Files will now be downloaded. Enter to continue.");
	print;
	#This line and the next download the files from S3
	picture.get_contents_to_filename("newBear.jpg"); 
	k.get_contents_to_filename("newHelloWorld.txt");
	#delete a key
	raw_input("File downloads 100% I am now going to delete the text file. Enter to continue.");
	print;
	#delete the text file.
	bucket1.delete_key("helloWorld");
	raw_input("The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue.");
	print;
	#create more buckets
	bucket2=s3.create_bucket("lab1_bucket2");
	bucket3=s3.create_bucket("lab1_bucket3");
	bucket4=s3.create_bucket("lab1_bucket4");
	raw_input("The buckets were created. I will now delete lab1_bucket4.");
	print;
	bucket4.delete();
	raw_input("lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision.");
	print;
	#find buckets
	print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
	print;
	print "Here is a list of all buckets:";
	print s3.get_all_buckets();
	print;
	try:
		print "test_bucket1:",
		print bucket1.get_acl();
	except NameError:
		print "The bucket 'bucket1' name does not exist.";
	try:
		print "lab1_bucket4:",
		print bucket4.get_acl();
	except :
		print "That bucket 'lab1_bucket4' does not exist. Invalid name.";
	print;
	raw_input("I am now going to copy the picture from test_bucket1 to lab1_bucket2.");
	#move object
	print;
	#kill object in 5 days
	picture.copy("lab1_bucket2","Bucket2Bear.jpg");
	raw_input("There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3.");
	print;
	cycle=Lifecycle();
	k3=Key(bucket3);
	cycle.add_rule("Five Days", "My Second picture", "Enabled", 5);
	bucket3.configure_lifecycle(cycle);
	k3.key="My Second picture";
	k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True);
	raw_input("Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made.");
	print;
	#create last bucket
	lastBucket=s3.create_bucket("last_bucket");
	lastBucket.configure_versioning(True, False, None);
	print "Version Status: ", #print versioning status
	print lastBucket.get_versioning_status();
	print;
	lastK=Key(lastBucket);
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt"); #add original hello world
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	#editted the same hello world
	with open("helloWorld.txt", "a") as f:
		f.writelines("\nI added some lines.\nLast Line.");
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt");
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	print "'.\nObject details: ";
	for version in lastBucket.list_versions():
		print version.name;
		print version.version_id;
		print;
		print;
	toDelete=raw_input("There should now be two different versions. Type the version of the file you would like to delete: ");
	try:
		print lastBucket.delete_key("MyFile", version_id=toDelete);
	except:
		print;
	raw_input("Version of the file you entered should be deleted.");
	lastK.set_metadata("My meta data", "This is the meta data");
	print; lastK.get_metadata("My meta data");
Beispiel #32
0
        return True
    prefix = 'an-from-gpu-to-agg-'
    if bucket_name[:len(prefix)] == prefix:
        return True
    if 'test' in bucket_name.split('-'):
        return True
    return False
s3 = boto.connect_s3()
del_all_pattern = '%s-lc-delete-all'
for b in s3.get_all_buckets():
    if delete( b.name ):
        print b.name
        try:
            config = b.get_lifecycle_config()
            for r in config:
                if r.id == del_all_pattern % b.name:
                    if len(b.get_all_keys()) > 0:
                        print "Want to delete %s but not empty" % b.name
                        print "Try again tomorrow"
                    else:
                        b.delete()
        except S3ResponseError as sre:
            continue
            if sre.error_code == 'NoSuchLifecycleConfiguration':
                print "Setting deletion lifecycle rule"
                lf = Lifecycle()
                lf.add_rule( id=del_all_pattern % b.name, expiration=Expiration(days=1),
                        prefix='', status='Enabled',transition=None  )
                b.configure_lifecycle(lf)

Beispiel #33
0
    def test_bucket_lifecycle(self):
        lifecycle_id = 'eutester lifecycle test'
        lifecycle_prefix = 'eulifecycle'
        lifecycle_status = 'Enabled'
        lifecycle_expiration = 1
        bucket_name = self.bucket_prefix + "lifecycle-test0"
        self.buckets_used.add(bucket_name)
        bucket = self.tester.create_bucket(bucket_name)

        lifecycle = Lifecycle()
        lifecycle.add_rule(lifecycle_id, lifecycle_prefix, lifecycle_status,
                           lifecycle_expiration)
        bucket.configure_lifecycle(lifecycle)
        responses = bucket.get_lifecycle_config()
        assert (len(responses) == 1), 'found not true'
        lifecycle_response = responses[0]
        assert (
            lifecycle_response.id == lifecycle_id
        ), "Expected lifecycle Id to be: " + lifecycle_id + " found " + lifecycle_response.id
        assert (
            lifecycle_response.prefix == lifecycle_prefix
        ), "Expected lifecycle prefix to be: " + lifecycle_prefix + " found " + lifecycle_response.prefix
        assert (
            lifecycle_response.status == lifecycle_status
        ), "Expected lifecycle status to be: " + lifecycle_status + " found " + lifecycle_response.status
        assert (lifecycle_response.expiration.days == lifecycle_expiration
                ), "Expected lifecycle expiration days to be: " + str(
                    lifecycle_expiration) + " found " + str(
                        lifecycle_response.expiration.days)

        bucket.delete_lifecycle_configuration()
        assert (
            len(responses) == 1), "Expected no configuration, found " + len(
                responses) + " configuration"

        # multiple rules
        bucket_name = self.bucket_prefix + "lifecycle-test1"
        bucket = self.tester.create_bucket(bucket_name)
        self.buckets_used.add(bucket_name)
        date = '2022-10-12T00:10:10.011Z'
        lifecycle = Lifecycle()
        lifecycle.add_rule("1", "1/", "Enabled", 1)
        lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
        lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
        lifecycle.add_rule("4", "4/", "Disabled", Expiration(date=date))
        bucket.configure_lifecycle(lifecycle)
        lifecycle_responses = bucket.get_lifecycle_config()
        if lifecycle_responses < 0:
            self.fail("no lifecycle found!")

        for response in lifecycle_responses:
            if response.id == "1":
                assert (
                    response.prefix == "1/"
                ), "Expected lifecycle prefix to be: " + "1/" + " found: " + response.prefix
                assert (
                    response.status == "Enabled"
                ), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status
                assert (response.expiration.days == 1
                        ), "Expected lifecycle expiration days to be: " + str(
                            1) + " found " + str(response.expiration.days)
            elif response.id == "2":
                assert (
                    response.prefix == "2/"
                ), "Expected lifecycle prefix to be: " + "2/" + " found: " + response.prefix
                assert (
                    response.status == "Enabled"
                ), "Expected lifecycle status to be: " + "Enabled" + " found: " + response.status
                assert (response.expiration.days == 2
                        ), "Expected lifecycle expiration days to be: " + str(
                            2) + " found " + str(response.expiration.days)
            elif response.id == "3":
                assert (
                    response.prefix == "3/"
                ), "Expected lifecycle prefix to be: " + "3/" + " found: " + response.prefix
                assert (
                    response.status == "Enabled"
                ), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status
                assert (
                    response.expiration.date == date
                ), "Expected lifecycle expiration days to be: " + date + " found " + str(
                    response.expiration.date)
            elif response.id == "4":
                assert (
                    response.prefix == "4/"
                ), "Expected lifecycle prefix to be: " + "4/" + " found: " + response.prefix
                assert (
                    response.status == "Disabled"
                ), "Expected lifecycle status to be: " + "Disabled" + " found " + response.status
                assert (
                    response.expiration.date == date
                ), "Expected lifecycle expiration days to be: " + date + " found " + str(
                    response.expiration.date)
            else:
                self.fail("no response found")

        self.debug("Cleaning up used buckets")
        for bucket in self.buckets_used:
            self.tester.clear_bucket(bucket)
Beispiel #34
0
    def test_bucket_lifecycle(self):
        # TODO add lifecycle operation in s3ops
        lifecycle_id = 'nephoria lifecycle test'
        lifecycle_prefix = 'eulifecycle'
        lifecycle_status = 'Enabled'
        lifecycle_expiration = 1
        bucket_name = self.bucket_prefix + "lifecycle-test0"
        self.buckets_used.add(bucket_name)
        bucket = self.tester.s3.create_bucket(bucket_name)

        lifecycle = Lifecycle()
        lifecycle.add_rule(lifecycle_id, lifecycle_prefix, lifecycle_status, lifecycle_expiration)
        bucket.configure_lifecycle(lifecycle)
        responses = bucket.get_lifecycle_config()
        assert (len(responses) == 1), 'found not true'
        lifecycle_response = responses[0]
        assert (lifecycle_response.id == lifecycle_id), "Expected lifecycle Id to be: " + lifecycle_id + " found " + lifecycle_response.id
        assert (lifecycle_response.prefix == lifecycle_prefix), "Expected lifecycle prefix to be: " + lifecycle_prefix + " found " + lifecycle_response.prefix
        assert (lifecycle_response.status == lifecycle_status), "Expected lifecycle status to be: " + lifecycle_status + " found " + lifecycle_response.status
        assert (lifecycle_response.expiration.days == lifecycle_expiration), "Expected lifecycle expiration days to be: " + str(lifecycle_expiration) + " found " + str(lifecycle_response.expiration.days)

        bucket.delete_lifecycle_configuration()
        assert (len(responses) == 1), "Expected no configuration, found " + len(responses) + " configuration"

        # multiple rules
        bucket_name = self.bucket_prefix + "lifecycle-test1"
        bucket = self.tester.s3.create_bucket(bucket_name)
        self.buckets_used.add(bucket_name)
        date = '2022-10-12T00:10:10.011Z'
        lifecycle = Lifecycle()
        lifecycle.add_rule("1", "1/", "Enabled", 1)
        lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
        lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
        lifecycle.add_rule("4", "4/", "Disabled", Expiration(date=date))
        bucket.configure_lifecycle(lifecycle)
        lifecycle_responses = bucket.get_lifecycle_config()
        if lifecycle_responses < 0:
            self.fail("no lifecycle found!")

        for response in lifecycle_responses:
            if response.id == "1":
                assert (response.prefix == "1/"), "Expected lifecycle prefix to be: " + "1/" + " found: " + response.prefix
                assert (response.status == "Enabled"), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status
                assert (response.expiration.days == 1), "Expected lifecycle expiration days to be: " + str(1) + " found " + str(response.expiration.days)
            elif response.id == "2":
                assert (response.prefix == "2/"), "Expected lifecycle prefix to be: " + "2/" + " found: " + response.prefix
                assert (response.status == "Enabled"), "Expected lifecycle status to be: " + "Enabled" + " found: " + response.status
                assert (response.expiration.days == 2), "Expected lifecycle expiration days to be: " + str(2) + " found " + str(response.expiration.days)
            elif response.id == "3":
                assert (response.prefix == "3/"), "Expected lifecycle prefix to be: " + "3/" + " found: " + response.prefix
                assert (response.status == "Enabled"), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status
                assert (response.expiration.date == date), "Expected lifecycle expiration days to be: " + date + " found " + str(response.expiration.date)
            elif response.id == "4":
                assert (response.prefix == "4/"), "Expected lifecycle prefix to be: " + "4/" + " found: " + response.prefix
                assert (response.status == "Disabled"), "Expected lifecycle status to be: " + "Disabled" + " found " + response.status
                assert (response.expiration.date == date), "Expected lifecycle expiration days to be: " + date + " found " + str(response.expiration.date)
            else:
                self.fail("no response found")

        self.debug("Cleaning up used buckets")
        for bucket in self.buckets_used:
            self.tester.s3.clear_bucket(bucket)
Beispiel #35
0
    "-c", "--create-bucket", dest="new_bucket", help="Creates a bucket, if it doesn't exist", metavar="BUCKET_NAME"
)
parser.add_option("-e", "--expiration", dest="life", help="Expiration in number of days", metavar="LIFE", type="int")
parser.add_option("-l", "--list", dest="list_bucket", help="List the contents of the bucket", metavar="LIST")
(options, args) = parser.parse_args()

conn = boto.connect_s3(id, key)
if options.new_bucket:
    bucket = conn.lookup(options.new_bucket)

    if type(bucket) is NoneType:
        bucket = conn.create_bucket(options.new_bucket)

        if options.life:
            life = Lifecycle()
            life.add_rule("s3push_expiration_rule", "", "Enabled", options.life)
            bucket.configure_lifecycle(life)

elif options.list_bucket:
    bucket = conn.lookup(options.list_bucket)
    for key in bucket:
        last_modified = datetime.datetime.strptime(key.last_modified, "%Y-%m-%dT%H:%M:%S.000Z")
        print key.name.ljust(70) + "\t" + last_modified.strftime("%d %B %Y, %I:%M%p").ljust(24) + "\t" + str(
            key.size / 1024
        ) + "KB"
    sys.exit(0)

else:
    bucket = conn.lookup(def_bucket)

if type(options.dir_name) is NoneType:
Beispiel #36
0
    bucket = conn.create_bucket(bucket_name)
    bucket.configure_website('index.html')

    print "Adding CORS settings..."

    cors_cfg = CORSConfiguration()
    cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'http://' + bucket_name + '.' + region, allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
    cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'http://localhost', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
    cors_cfg.add_rule('GET', '*')
    bucket.set_cors(cors_cfg)

    print "Adding Lifecycle settings..."

    lifecycle_cfg = Lifecycle()
    lifecycle_cfg.add_rule('d1', 'u/d1/', 'Enabled', 1)
    lifecycle_cfg.add_rule('d2', 'u/d2/', 'Enabled', 2)
    lifecycle_cfg.add_rule('d3', 'u/d3/', 'Enabled', 3)
    lifecycle_cfg.add_rule('d4', 'u/d4/', 'Enabled', 4)
    lifecycle_cfg.add_rule('d5', 'u/d5/', 'Enabled', 5)
    lifecycle_cfg.add_rule('d6', 'u/d6/', 'Enabled', 6)
    lifecycle_cfg.add_rule('d7', 'u/d7/', 'Enabled', 7)
    lifecycle_cfg.add_rule('d14', 'u/d14/', 'Enabled', 14)
    lifecycle_cfg.add_rule('d30', 'u/d30/', 'Enabled', 30)
    bucket.configure_lifecycle(lifecycle_cfg)

print "Uploading site files..."

# Only upload the files we need
files = [
['./', 'index.html'],
Beispiel #37
0
def main():
    raw_input(
        "I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue."
    )
    print
    with open("HelloWorld.txt", "w") as f:
        f.writelines("I hope you can read this file!")
    s3 = boto.connect_s3()
    bucket1 = s3.create_bucket('test_bucket1')
    #creates an s3 bucket.
    print "'test_bucket1' should be created. GO CHECK! Press enter to continue."
    raw_input()
    #I am going to create two new keys
    raw_input(
        "I am going to add a textfile and picture to S3. Press enter to continue."
    )
    k = Key(bucket1)
    picture = Key(bucket1)
    picture.key = "picture"
    picture.set_contents_from_filename("bearandi.jpg")
    k.key = "helloWorld"
    k.set_contents_from_filename("helloWorld.txt")
    print
    raw_input(
        "Look at the files on S3. The Files will now be downloaded. Enter to continue."
    )
    print
    #This line and the next download the files from S3
    picture.get_contents_to_filename("newBear.jpg")
    k.get_contents_to_filename("newHelloWorld.txt")
    #delete a key
    raw_input(
        "File downloads 100% I am now going to delete the text file. Enter to continue."
    )
    print
    #delete the text file.
    bucket1.delete_key("helloWorld")
    raw_input(
        "The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue."
    )
    print
    #create more buckets
    bucket2 = s3.create_bucket("lab1_bucket2")
    bucket3 = s3.create_bucket("lab1_bucket3")
    bucket4 = s3.create_bucket("lab1_bucket4")
    raw_input("The buckets were created. I will now delete lab1_bucket4.")
    print
    bucket4.delete()
    raw_input(
        "lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision."
    )
    print
    #find buckets
    print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
    print
    print "Here is a list of all buckets:"
    print s3.get_all_buckets()
    print
    try:
        print "test_bucket1:",
        print bucket1.get_acl()
    except NameError:
        print "The bucket 'bucket1' name does not exist."
    try:
        print "lab1_bucket4:",
        print bucket4.get_acl()
    except:
        print "That bucket 'lab1_bucket4' does not exist. Invalid name."
    print
    raw_input(
        "I am now going to copy the picture from test_bucket1 to lab1_bucket2."
    )
    #move object
    print
    #kill object in 5 days
    picture.copy("lab1_bucket2", "Bucket2Bear.jpg")
    raw_input(
        "There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3."
    )
    print
    cycle = Lifecycle()
    k3 = Key(bucket3)
    cycle.add_rule("Five Days", "My Second picture", "Enabled", 5)
    bucket3.configure_lifecycle(cycle)
    k3.key = "My Second picture"
    k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True)
    raw_input(
        "Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made."
    )
    print
    #create last bucket
    lastBucket = s3.create_bucket("last_bucket")
    lastBucket.configure_versioning(True, False, None)
    print "Version Status: ",  #print versioning status
    print lastBucket.get_versioning_status()
    print
    lastK = Key(lastBucket)
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    #add original hello world
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    #editted the same hello world
    with open("helloWorld.txt", "a") as f:
        f.writelines("\nI added some lines.\nLast Line.")
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    print "'.\nObject details: "
    for version in lastBucket.list_versions():
        print version.name
        print version.version_id
        print
        print
    toDelete = raw_input(
        "There should now be two different versions. Type the version of the file you would like to delete: "
    )
    try:
        print lastBucket.delete_key("MyFile", version_id=toDelete)
    except:
        print
    raw_input("Version of the file you entered should be deleted.")
    lastK.set_metadata("My meta data", "This is the meta data")
    print
    lastK.get_metadata("My meta data")
Beispiel #38
0
import boto
from amazon.dynamo import User
from app import user_keys

s3 = boto.connect_s3()

for u in User().get_sites():
    from boto.s3.lifecycle import (
        Lifecycle,
        Expiration,
    )
    bucket = s3.get_bucket(u[user_keys.user_role])
    bucket.delete_lifecycle_configuration()
    lifecycle = Lifecycle()
    for l in u[user_keys.user_site_leagues]:
        lifecycle.add_rule(l + ' tweets expire',
                           prefix=l + '/tweet',
                           status='Enabled',
                           expiration=Expiration(days=7))

    bucket.configure_lifecycle(lifecycle)
Beispiel #39
0
 def test_expiration_with_no_transition(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', 'prefix', 'Enabled', 30)
     xml = lifecycle.to_xml()
     self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
Beispiel #40
0
                      'http://' + bucket_name + '.' + region,
                      allowed_header='*',
                      max_age_seconds=3000,
                      expose_header='x-amz-server-side-encryption')
    cors_cfg.add_rule(['PUT', 'POST', 'DELETE'],
                      'http://localhost',
                      allowed_header='*',
                      max_age_seconds=3000,
                      expose_header='x-amz-server-side-encryption')
    cors_cfg.add_rule('GET', '*')
    bucket.set_cors(cors_cfg)

    print("Adding Lifecycle settings...")

    lifecycle_cfg = Lifecycle()
    lifecycle_cfg.add_rule('d1', 'u/d1/', 'Enabled', 1)
    lifecycle_cfg.add_rule('d2', 'u/d2/', 'Enabled', 2)
    lifecycle_cfg.add_rule('d3', 'u/d3/', 'Enabled', 3)
    lifecycle_cfg.add_rule('d4', 'u/d4/', 'Enabled', 4)
    lifecycle_cfg.add_rule('d5', 'u/d5/', 'Enabled', 5)
    lifecycle_cfg.add_rule('d6', 'u/d6/', 'Enabled', 6)
    lifecycle_cfg.add_rule('d7', 'u/d7/', 'Enabled', 7)
    lifecycle_cfg.add_rule('d14', 'u/d14/', 'Enabled', 14)
    lifecycle_cfg.add_rule('d30', 'u/d30/', 'Enabled', 30)
    bucket.configure_lifecycle(lifecycle_cfg)

print("Uploading site files...")

# Only upload the files we need
files = [
    ['./', 'index.html'],
Beispiel #41
0
    def site_config(self, site):
        with hook('site config %s' % self.name, self, site):
            setup_aws_access_key(site)

            from boto import connect_s3
            from boto.s3.bucket import Bucket
            from boto.s3.key import Key

            for bucket_config in self.settings['buckets']:
                # Connect and make sure the bucket exists
                print bold(u'Configuring bucket %s...' % bucket_config['name'])
                connection = connect_s3()
                try:
                    bucket = connection.get_bucket(bucket_config['name'])
                except:
                    bucket = connection.create_bucket(bucket_config['name'])
                # Set the bucket policy
                if bucket_config.has_key('policy'):
                    bucket.set_policy(bucket_config['policy'])
                # Setup CORS, array of rules
                # http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.cors.CORSConfiguration
                if bucket_config.has_key('cors') and bucket_config['cors'] is None:
                    # If explicity set to None, then remove the cors policy
                    bucket.delete_cors()
                else:
                    if not bucket_config.has_key('cors'):
                        # If not specified, use the default GET policy
                        bucket_config['cors'] = (DEFAULT_CORS_RULE,)
                    from boto.s3.cors import CORSConfiguration
                    cors_config = CORSConfiguration()
                    for rule in bucket_config['cors']:
                        cors_config.add_rule(**rule)
                    bucket.set_cors(cors_config)
                # Setup the lifecycle, array of rules
                # http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.lifecycle.Lifecycle
                if bucket_config.has_key('lifecycle'):
                    from boto.s3.lifecycle import Lifecycle
                    lifecycle_config = Lifecycle()
                    for rule in bucket_config['lifecycle']:
                        lifecycle_config.add_rule(**rule)
                    bucket.configure_lifecycle(lifecycle_config)
                else:
                    bucket.delete_lifecycle_configuration()
                # Setup the bucket website hosting {suffix, error_key, routing_rules, redirect_all_requests_to}
                # http://boto.readthedocs.org/en/latest/ref/s3.html
                # https://github.com/boto/boto/blob/develop/boto/s3/website.py
                if bucket_config.has_key('website'):
                    # Expand the routing rules, array of {condition, redirect}
                    if bucket_config['website'].has_key('routing_rules'):
                        from boto.s3.website import RoutingRules, RoutingRule
                        routing_rules = RoutingRules()
                        for rule in bucket_config['website']['routing_rules']:
                            routing_rules.add_rule(RoutingRule(**rule))
                        bucket_config['website']['routing_rules'] = routing_rules
                    # Expand the redirect, redirect_all_requests_to is {hostname, protocol}
                    if bucket_config['website'].has_key('redirect_all_requests_to'):
                        from boto.s3.website import RedirectLocation
                        bucket_config['website']['redirect_all_requests_to'] = RedirectLocation(**bucket_config['website']['redirect_all_requests_to'])
                    bucket.configure_website(**bucket_config['website'])
                else:
                    bucket.delete_website_configuration()
Beispiel #42
0
 def test_expiration_with_no_transition(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', 'prefix', 'Enabled', 30)
     xml = lifecycle.to_xml()
     self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
Beispiel #43
0
    def site_config(self, site):
        with hook('site config %s' % self.name, self, site):
            setup_aws_access_key(site)

            from boto import connect_s3
            from boto.s3.bucket import Bucket
            from boto.s3.key import Key

            for bucket_config in self.settings['buckets']:
                # Connect and make sure the bucket exists
                print bold(u'Configuring bucket %s...' % bucket_config['name'])
                connection = connect_s3()
                try:
                    bucket = connection.get_bucket(bucket_config['name'])
                except:
                    bucket = connection.create_bucket(bucket_config['name'])
                # Set the bucket policy
                if bucket_config.has_key('policy'):
                    bucket.set_policy(bucket_config['policy'])
                # Setup CORS, array of rules
                # http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.cors.CORSConfiguration
                if bucket_config.has_key(
                        'cors') and bucket_config['cors'] is None:
                    # If explicity set to None, then remove the cors policy
                    bucket.delete_cors()
                else:
                    if not bucket_config.has_key('cors'):
                        # If not specified, use the default GET policy
                        bucket_config['cors'] = (DEFAULT_CORS_RULE, )
                    from boto.s3.cors import CORSConfiguration
                    cors_config = CORSConfiguration()
                    for rule in bucket_config['cors']:
                        cors_config.add_rule(**rule)
                    bucket.set_cors(cors_config)
                # Setup the lifecycle, array of rules
                # http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.lifecycle.Lifecycle
                if bucket_config.has_key('lifecycle'):
                    from boto.s3.lifecycle import Lifecycle
                    lifecycle_config = Lifecycle()
                    for rule in bucket_config['lifecycle']:
                        lifecycle_config.add_rule(**rule)
                    bucket.configure_lifecycle(lifecycle_config)
                else:
                    bucket.delete_lifecycle_configuration()
                # Setup the bucket website hosting {suffix, error_key, routing_rules, redirect_all_requests_to}
                # http://boto.readthedocs.org/en/latest/ref/s3.html
                # https://github.com/boto/boto/blob/develop/boto/s3/website.py
                if bucket_config.has_key('website'):
                    # Expand the routing rules, array of {condition, redirect}
                    if bucket_config['website'].has_key('routing_rules'):
                        from boto.s3.website import RoutingRules, RoutingRule
                        routing_rules = RoutingRules()
                        for rule in bucket_config['website']['routing_rules']:
                            routing_rules.add_rule(RoutingRule(**rule))
                        bucket_config['website'][
                            'routing_rules'] = routing_rules
                    # Expand the redirect, redirect_all_requests_to is {hostname, protocol}
                    if bucket_config['website'].has_key(
                            'redirect_all_requests_to'):
                        from boto.s3.website import RedirectLocation
                        bucket_config['website'][
                            'redirect_all_requests_to'] = RedirectLocation(
                                **bucket_config['website']
                                ['redirect_all_requests_to'])
                    bucket.configure_website(**bucket_config['website'])
                else:
                    bucket.delete_website_configuration()