Exemple #1
0
def setup_bucket(s3, dirname, bucket_name):
    """Ensures the given bucket exists and prepares it for a duplicity run
	"""
    if not s3.lookup(bucket_name):
        s3.create_bucket(bucket_name)
        time.sleep(5)
    bucket = s3.get_bucket(bucket_name)

    # tag this bucket with the directory so we know what it
    # is when we retrieve it after the terrible fire or burglary
    tags = Tags()
    tagset = TagSet()
    tagset.add_tag('path', dirname)
    tags.add_tag_set(tagset)
    bucket.set_tags(tags)

    # turn off any lifecycle rotations while we are in the middle of a backup
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('movetoglacier',
                'duplicity',
                'Disabled',
                transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)

    # rename the manifest files from their glacier-safe versions
    keys = bucket.list(prefix='_duplicity')
    for key in keys:
        key.copy(bucket_name, key.name.replace("_duplicity", "duplicity"))
        key.delete()

    return bucket
Exemple #2
0
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3(aws_secret_access_key=self.ec2_secret_key,
                                   aws_access_key_id=self.ec2_access_key)

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')

            return "https://s3.amazonaws.com/{bucket}/{filename}".format(bucket=self.bucket_name, filename=filename)

        except Exception, e:
            logging.error("S3StorageAgent failed with exception:\n{0}".format(str(e)))
            logging.error(traceback.format_exc())
            raise e
Exemple #3
0
 def __init__(self, bucket_name, s3_to_glacier_after_days=None):
     # create s3 connection
     # create bucket if does not exist
     # create S3 connection if archive_S3_bucket name is specified
     self.__bucket_name = bucket_name
     self.__s3_conn = boto.connect_s3()
     self.__bucket = self.__s3_conn.lookup(self.__bucket_name)
     if not self.__bucket:
         try:
             self.__bucket = self.__s3_conn.create_bucket(
                 self.__bucket_name)
             if s3_to_glacier_after_days is not None:
                 to_glacier = Transition(days=s3_to_glacier_after_days,
                                         storage_class='GLACIER')
                 rule = Rule(id='archive-rule1',
                             status='Enabled',
                             transition=to_glacier)
                 lifecycle = Lifecycle()
                 lifecycle.append(rule)
                 self.__bucket.configure_lifecycle(lifecycle)
         except S3CreateError:
             logger.error('failed to create S3 bucket[' +
                          self.__bucket_name +
                          ']. please check your AWS policy.')
             raise
Exemple #4
0
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(self.bucket_name, location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')

        except Exception, e:
            sys.stdout.write("AmazonS3Agent failed with exception:\n{0}".format(str(e)))
            sys.stdout.flush()
            raise e
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename',
                               prefix='logs/',
                               status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(
                    self.bucket_name,
                    location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename,
                                         cb=self.percent_cb,
                                         num_cb=10)
            k.set_acl('public-read-write')

        except Exception, e:
            sys.stdout.write(
                "AmazonS3Agent failed with exception:\n{0}".format(str(e)))
            sys.stdout.flush()
            raise e
Exemple #6
0
def glacier(name):
   bucket = conn.get_bucket(name)
   to_glacier = boto.s3.lifecycle.Transition(days=30, storage_class='GLACIER')
   rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
   lifecycle = Lifecycle()
   lifecycle.append(rule)
   bucket.configure_lifecycle(lifecycle)
def setup_bucket(s3, dirname, bucket_name):
	"""Ensures the given bucket exists and prepares it for a duplicity run
	"""
	if not s3.lookup(bucket_name):
		s3.create_bucket(bucket_name)
		time.sleep(5)
	bucket = s3.get_bucket(bucket_name)
	
	# tag this bucket with the directory so we know what it 
	# is when we retrieve it after the terrible fire or burglary
	tags = Tags()
	tagset = TagSet()
	tagset.add_tag('path', dirname)
	tags.add_tag_set(tagset)
	bucket.set_tags(tags)

	# turn off any lifecycle rotations while we are in the middle of a backup
	to_glacier = Transition(days=1, storage_class='GLACIER')
	rule = Rule('movetoglacier', 'duplicity', 'Disabled', transition=to_glacier)
	lifecycle = Lifecycle()
	lifecycle.append(rule)
	bucket.configure_lifecycle(lifecycle)

	# rename the manifest files from their glacier-safe versions
	keys = bucket.list(prefix = '_duplicity')
	for key in keys:
		key.copy(bucket_name, key.name.replace("_duplicity", "duplicity"))
		key.delete()

	return bucket
	def expire(self, days, transition):
		self.days = int(days)
		self.transition = transition
		#using nargs in ArgumentParser leads to passing lists, use the robustness principle
		if type(self.transition) == list: self.transition = str(self.transition[0])

		if self.transition == 'delete':
			pass

		if self.transition == 'glacier':
			lifecycle = Lifecycle()
			lifecycle_action = Transition(days=self.days, storage_class='GLACIER')

			rule = Rule('ruleid', 'logs/', 'Enabled', transition=lifecycle_action)
			lifecycle.append(rule)


		for key in self.key_list:
			content_type, unused = mimetypes.guess_type(key.name)
			if not content_type:
				content_type = 'text/plain'
			expire_time =  datetime.utcnow() + timedelta(days=(self.days))
			expire_time = expire_time.strftime(("%a, %d %b %Y %H:%M:%S GMT"))
			metadata = {'Expires': expire_time, 'Content-Type': content_type}

			if self.debug: debug_message(key.name, metadata)

			self.key_list_content.append(key)
			key.copy(self.bucket_name, key, metadata=metadata, preserve_acl=True)

		if self.debug: print debug_message('debug', "bucket: {x.bucket_name}, policy: {x.command}, days: {x.days}".format(x=self))
Exemple #9
0
def setDeletionPolicy(bucket):
	"""
	Creates a lifecycle policy that automatically deletes all the files in the subfolder after one day.
	"""
	lifecycle = Lifecycle()
	lifecycle.add_rule("Audo-delete objects in %s after 1 day" % aws_common.S3_RESPONSE_PREFIX, aws_common.S3_RESPONSE_PREFIX, "Enabled", 1)
	print "Added deletion policy"
	bucket.configure_lifecycle(lifecycle)
Exemple #10
0
 def test_lifecycle_with_defaults(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule(expiration=30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertNotEqual(len(actual_lifecycle.id), 0)
     self.assertEqual(actual_lifecycle.prefix, '')
Exemple #11
0
def get_lifecycle(expiration_path, days_to_expiration):
    if days_to_expiration is not None and expiration_path is not None:
        lifecycle = Lifecycle()
        print "Adding expiration rule of %s days for S3 path %s" % (days_to_expiration, expiration_path)
        lifecycle.add_rule('expirationrule', prefix=expiration_path, status='Enabled', expiration=Expiration(days=int(days_to_expiration)))
        return lifecycle
    else:
        print "No expiration rule added"
        return None
 def test_lifecycle_with_defaults(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule(expiration=30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertNotEqual(len(actual_lifecycle.id), 0)
     self.assertEqual(actual_lifecycle.prefix, '')
Exemple #13
0
def backup_bucket(bucketname):
    connect()

    bucket = s3.get_bucket(bucketname)
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('ruleid', '/', 'Enabled', transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)

    return True
Exemple #14
0
 def test_lifecycle(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', '', 'Enabled', 30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertEqual(actual_lifecycle.id, 'myid')
     self.assertEqual(actual_lifecycle.prefix, '')
     self.assertEqual(actual_lifecycle.status, 'Enabled')
     self.assertEqual(actual_lifecycle.transition, None)
def upload(bucket_name, image_name, image):
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)

    lifecycle = Lifecycle()
    lifecycle.add_rule('s3-image-uploader', prefix=FILE_PREFIX, status='Enabled', expiration=Expiration(days=EXPIRATION))
    bucket.configure_lifecycle(lifecycle)

    k = Key(bucket)
    k.key = image_name
    k.set_contents_from_string(image)
 def test_lifecycle(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', '', 'Enabled', 30)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     self.assertEqual(len(response), 1)
     actual_lifecycle = response[0]
     self.assertEqual(actual_lifecycle.id, 'myid')
     self.assertEqual(actual_lifecycle.prefix, '')
     self.assertEqual(actual_lifecycle.status, 'Enabled')
     self.assertEqual(actual_lifecycle.transition, None)
Exemple #17
0
def destroy_lifecycle_rule(connection, module):

    name = module.params.get("name")
    prefix = module.params.get("prefix")
    rule_id = module.params.get("rule_id")
    changed = False

    if prefix is None:
        prefix = ""

    try:
        bucket = connection.get_bucket(name)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    # Get the bucket's current lifecycle rules
    try:
        current_lifecycle_obj = bucket.get_lifecycle_config()
    except S3ResponseError as e:
        if e.error_code == "NoSuchLifecycleConfiguration":
            module.exit_json(changed=changed)
        else:
            module.fail_json(msg=e.message)

    # Create lifecycle
    lifecycle_obj = Lifecycle()

    # Check if rule exists
    # If an ID exists, use that otherwise compare based on prefix
    if rule_id is not None:
        for existing_rule in current_lifecycle_obj:
            if rule_id == existing_rule.id:
                # We're not keeping the rule (i.e. deleting) so mark as changed
                changed = True
            else:
                lifecycle_obj.append(existing_rule)
    else:
        for existing_rule in current_lifecycle_obj:
            if prefix == existing_rule.prefix:
                # We're not keeping the rule (i.e. deleting) so mark as changed
                changed = True
            else:
                lifecycle_obj.append(existing_rule)

    # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
    try:
        if lifecycle_obj:
            bucket.configure_lifecycle(lifecycle_obj)
        else:
            bucket.delete_lifecycle_configuration()
    except BotoServerError as e:
        module.fail_json(msg=e.message)

    module.exit_json(changed=changed)
def backup_bucket(bucketname):
    connect()

    bucket = s3.get_bucket(bucketname)
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('ruleid', '/', 'Enabled', transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)

    return True
Exemple #19
0
 def test_lifecycle_with_glacier_transition(self):
     lifecycle = Lifecycle()
     transition = Transition(days=30, storage_class='GLACIER')
     rule = Rule('myid', prefix='', status='Enabled', expiration=None,
                 transition=transition)
     lifecycle.append(rule)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     transition = response[0].transition
     self.assertEqual(transition.days, 30)
     self.assertEqual(transition.storage_class, 'GLACIER')
     self.assertEqual(transition.date, None)
Exemple #20
0
def test_lifecycle_delete():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule(expiration=30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    response.should.have.length_of(1)

    bucket.delete_lifecycle_configuration()
    bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError)
 def test_lifecycle_with_glacier_transition(self):
     lifecycle = Lifecycle()
     transition = Transition(days=30, storage_class='GLACIER')
     rule = Rule('myid', prefix='', status='Enabled', expiration=None,
                 transition=transition)
     lifecycle.append(rule)
     self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
     response = self.bucket.get_lifecycle_config()
     transition = response[0].transition
     self.assertEqual(transition.days, 30)
     self.assertEqual(transition.storage_class, 'GLACIER')
     self.assertEqual(transition.date, None)
Exemple #22
0
def get_lifecycle(expiration_path, days_to_expiration):
    if days_to_expiration is not None and expiration_path is not None:
        lifecycle = Lifecycle()
        print "Adding expiration rule of %s days for S3 path %s" % (
            days_to_expiration, expiration_path)
        lifecycle.add_rule('expirationrule',
                           prefix=expiration_path,
                           status='Enabled',
                           expiration=Expiration(days=int(days_to_expiration)))
        return lifecycle
    else:
        print "No expiration rule added"
        return None
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
            bucket         = dict(required=True),
            s3_url         = dict(aliases=['S3_URL']),
            lifecycle      = dict(type='list', required=True),
        ),
    )

    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    bucket = module.params.get('bucket')
    s3_url = module.params.get('s3_url')
    lifecycle = module.params.get('lifecycle')

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
    s3 = get_s3_connection(module, aws_connect_kwargs, region, s3_url)

    validate_lifecycle(module, bucket, lifecycle)

    bucket_obj = bucket_lookup(module, s3, bucket)
    if not bucket_obj:
        module.fail_json(msg='Bucket %s does not exist' %bucket)

    lifecycle_config = None
    new_lifecycle = False
    try:
        lifecycle_config = bucket_obj.get_lifecycle_config()
    except boto.exception.S3ResponseError:
        new_lifecycle = True

    results = calculate_net_rules(lifecycle_config, lifecycle)

    if results.changed:
        try:
            if results.rules.__len__() > 0:
                lifecycle_config = Lifecycle()
                for rule in results.rules:
                    lifecycle_config.append(rule)
                bucket_obj.configure_lifecycle(lifecycle_config)
            else:
                bucket_obj.delete_lifecycle_configuration()
                module.exit_json(bucket=bucket, changed=True,
                                 msg='Lifecycle Configuration deleted')
        except boto.exception.S3ResponseError, e:
            module.fail_json(bucket=bucket, changed=results.changed,
                             msg="Error %s: %s" % (e.error_code, e.message),
                             lifecycle_rules=map(lambda x: x.to_xml(), results.rules))
def upload(bucket_name, image_name, image):
    conn = boto.connect_s3()
    bucket = conn.get_bucket(bucket_name)

    lifecycle = Lifecycle()
    lifecycle.add_rule('s3-image-uploader',
                       prefix=FILE_PREFIX,
                       status='Enabled',
                       expiration=Expiration(days=EXPIRATION))
    bucket.configure_lifecycle(lifecycle)

    k = Key(bucket)
    k.key = image_name
    k.set_contents_from_string(image)
Exemple #25
0
 def test_lifecycle_jp(self):
     # test lifecycle with Japanese prefix
     name = "Japanese files"
     prefix = "日本語/"
     days = 30
     lifecycle = Lifecycle()
     lifecycle.add_rule(name, prefix, "Enabled", days)
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config()
     for rule in readlifecycle:
         self.assertEqual(rule.id, name)
         self.assertEqual(rule.expiration.days, days)
Exemple #26
0
def test_lifecycle_create():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule('myid', '', 'Enabled', 30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    len(response).should.equal(1)
    lifecycle = response[0]
    lifecycle.id.should.equal('myid')
    lifecycle.prefix.should.equal('')
    lifecycle.status.should.equal('Enabled')
    list(lifecycle.transition).should.equal([])
Exemple #27
0
def test_lifecycle_create():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    lifecycle.add_rule("myid", "", "Enabled", 30)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    len(response).should.equal(1)
    lifecycle = response[0]
    lifecycle.id.should.equal("myid")
    lifecycle.prefix.should.equal("")
    lifecycle.status.should.equal("Enabled")
    list(lifecycle.transition).should.equal([])
 def test_lifecycle_jp(self):
     # test lifecycle with Japanese prefix
     name = "Japanese files"
     prefix = u"日本語/"
     days = 30
     lifecycle = Lifecycle()
     lifecycle.add_rule(name, prefix, "Enabled", days)
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config();
     for rule in readlifecycle:
         self.assertEqual(rule.id, name)
         self.assertEqual(rule.expiration.days, days)
    def update_with_config(self, config):
        lifecycle = Lifecycle()
        got_rule = False
        for x_rule in config.findall("Rule"):
            got_rule = True
            lifecycle.add_rule(**self.get_rule_kwargs_from_xml(x_rule))

        if got_rule:
            success = self.bucket.configure_lifecycle(lifecycle)
        else:
            success = self.bucket.delete_lifecycle_configuration()
        if not success:
            print "Failed to update rules"
            sys.exit(1)
        print "Successfully updated rule"
Exemple #30
0
def test_lifecycle_with_glacier_transition():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    lifecycle = Lifecycle()
    transition = Transition(days=30, storage_class='GLACIER')
    rule = Rule('myid', prefix='', status='Enabled', expiration=None,
                transition=transition)
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
    response = bucket.get_lifecycle_config()
    transition = response[0].transition
    transition.days.should.equal(30)
    transition.storage_class.should.equal('GLACIER')
    transition.date.should.equal(None)
def set_bucket_lifetime(bucket_name, days=14, aws_access={}, conn=None):
    '''
    Set an expiration on a bucket in S3.
    '''

    conn = return_s3_connection(aws_access) if conn is None else conn

    bucket = conn.get_bucket(bucket_name)
    expiration = Expiration(days=days)
    rule = Rule(id='ruleid', prefix='', status='Enabled',
                expiration=expiration)
    lifecycle = Lifecycle()
    lifecycle.append(rule)

    return bucket.configure_lifecycle(lifecycle)
Exemple #32
0
    def freeze(self, time=0):
        if time and isinstance(time, timedelta):
            time += timedelta(hours=12)
            transition = Transition(days=time.days, storage_class='GLACIER')
        elif time and isinstance(time, datetime):
            transition = Transition(days=(time-datetime.now()).days, storage_class='GLACIER')
        else:
            transition = Transition(days=time, storage_class='GLACIER')

        key = self.key.key
        rule = Rule(key, key, 'Enabled', transition=transition)

        lifecycle = Lifecycle()
        lifecycle.append(rule)
        self.bucket.configure_lifecycle(lifecycle)
Exemple #33
0
def create_s3():
    """
    Create the S3 buckets

    All the buckets use the galaxy name as the 'folder'
    :return:
    """
    # Create the bucket for the images
    s3 = boto.connect_s3()
    images_bucket = 'icrar.{0}.galaxy-images'.format(env.project_name)
    bucket = s3.create_bucket(images_bucket)
    bucket.set_acl('public-read')
    bucket.configure_website(suffix='index.html')
    bucket.set_policy('''{
  "Statement":[
    {
        "Sid":"PublicReadForGetBucketObjects",
        "Effect":"Allow",
        "Principal": {
                "AWS": "*"
        },
        "Action":["s3:GetObject"],
        "Resource":["arn:aws:s3:::%s/*"]
    }
  ]
}
''' % images_bucket)

    # Create the bucket for the output files
    file_bucket = 'icrar.{0}.files'.format(env.project_name)
    s3.create_bucket(file_bucket)

    # Create the bucket for the stats files
    file_bucket = 'icrar.{0}.archive'.format(env.project_name)
    bucket = s3.create_bucket(file_bucket)
    to_glacier = Transition(days=10, storage_class='GLACIER')
    rule1 = Rule('rule01',
                 status='Enabled',
                 prefix='stats/',
                 transition=to_glacier)
    rule2 = Rule('rule02',
                 status='Enabled',
                 prefix='logs/',
                 expiration=Expiration(days=20))
    lifecycle = Lifecycle()
    lifecycle.append(rule1)
    lifecycle.append(rule2)
    bucket.configure_lifecycle(lifecycle)
Exemple #34
0
    def configure_s3rule(self, bucket, rules=None):
        """
        :type bucket: object
        :param bucket: the boto object of S3 bucket

        :type rules: dict
        :param rules: describes the lifecycle rules

        :rtype: list
        :return: a list of results
        """
        self.lifecycle = Lifecycle()
        if rules and isinstance(rules, dict):
            for id, rule in rules.iteritems():
                self.add_rule(id=id, **rule)
            try:
                old = bucket.get_lifecycle_config()
            except S3ResponseError, e:
                if "nosuchlifecycleconfiguration" in str(e).lower():
                    old = None
                else:
                    raise
            if old:
                log.info("old s3 rules found, need to delete first")
                bucket.delete_lifecycle_configuration()
                log.info("old s3 rules has been deleted: %s" % old)
            log.info("now add new s3 rules")
            return bucket.configure_lifecycle(self.lifecycle)
def set_bucket_lifetime(bucket_name, days=14, aws_access={}, conn=None):
    '''
    Set an expiration on a bucket in S3.
    '''

    conn = return_s3_connection(aws_access) if conn is None else conn

    bucket = conn.get_bucket(bucket_name)
    expiration = Expiration(days=days)
    rule = Rule(id='ruleid',
                prefix='',
                status='Enabled',
                expiration=expiration)
    lifecycle = Lifecycle()
    lifecycle.append(rule)

    return bucket.configure_lifecycle(lifecycle)
Exemple #36
0
def create_s3():
    """
    Create the S3 buckets

    All the buckets use the galaxy name as the 'folder'
    :return:
    """
    # Create the bucket for the images
    s3 = boto.connect_s3()
    images_bucket = "icrar.{0}.galaxy-images".format(env.project_name)
    bucket = s3.create_bucket(images_bucket)
    bucket.set_acl("public-read")
    bucket.configure_website(suffix="index.html")
    bucket.set_policy(
        """{
  "Statement":[
    {
        "Sid":"PublicReadForGetBucketObjects",
        "Effect":"Allow",
        "Principal": {
                "AWS": "*"
        },
        "Action":["s3:GetObject"],
        "Resource":["arn:aws:s3:::%s/*"]
    }
  ]
}
"""
        % images_bucket
    )

    # Create the bucket for the output files
    file_bucket = "icrar.{0}.files".format(env.project_name)
    s3.create_bucket(file_bucket)

    # Create the bucket for the stats files
    file_bucket = "icrar.{0}.archive".format(env.project_name)
    bucket = s3.create_bucket(file_bucket)
    to_glacier = Transition(days=10, storage_class="GLACIER")
    rule1 = Rule("rule01", status="Enabled", prefix="stats/", transition=to_glacier)
    rule2 = Rule("rule02", status="Enabled", prefix="logs/", expiration=Expiration(days=20))
    lifecycle = Lifecycle()
    lifecycle.append(rule1)
    lifecycle.append(rule2)
    bucket.configure_lifecycle(lifecycle)
Exemple #37
0
def test_set_lifecycle_policy():
    """
     PUTs arbitraty lifecycle_policy and checks whether GET lifecycle_policy API call returns 200 
     and other lifecycle_policy metadata is as set in PUT call 
    """
    bucket = helpers.get_bucket()
    transitions = Transitions()
    transitions.add_transition(days=30, storage_class='STANDARD_IA')
    transitions.add_transition(days=90, storage_class='GLACIER')
    expiration = Expiration(days=120)
    rule = Rule(id='ruleid',
                prefix='logs/',
                status='Enabled',
                expiration=expiration,
                transition=transitions)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    assert bucket.configure_lifecycle(lifecycle) == True
Exemple #38
0
 def _cleanup_s3(self, bucket_name):
     """
     Adds lifecycle rule (DEL_LIFECYCLE_PATTERN % bucket_name)
     to bucket_name that marks all objects in this
     bucket as expiring(delete) in 1 day
     """
     conn = boto.connect_s3()
     b = conn.get_bucket( bucket_name )
     del_all_pattern = DEL_LIFECYCLE_PATTERN 
     msg =  "Setting deletion lifecycle rule for %s" 
     msg = msg % bucket_name 
     self.logger.info(msg)
     lf = Lifecycle()
     lf.add_rule( id=del_all_pattern % b.name,
             expiration=Expiration(days=1),
             prefix='', status='Enabled',
             transition=None  )
     b.configure_lifecycle(lf)
Exemple #39
0
def lifecycle():
    #transitions = Transitions()
    exp = Expiration(date="2018-06-13 07:05:00")
    #exp = Expiration(days=1)
    rule = Rule(id='rule-1', prefix='', status='Enabled', expiration=exp)
    lifecycle = Lifecycle()
    lifecycle.append(rule)

    bucket = conn.get_bucket(bucket_name)
    ret = bucket.configure_lifecycle(lifecycle)
    print "Bucket Lifecycle Set:", ret
    print "========================="

    current = bucket.get_lifecycle_config()
    print "Bucket Lifecycle Conf:", current
    print "Tran:", current[0].transition
    print "Expi:", current[0].expiration
    print "========================="
def backup_transition(bucket_name, bucket_folder):
    # Engaging to backup transition
    S3_BUCKET = bucket_name
    bucket = conn.get_bucket(S3_BUCKET)

    ruleid = "rule_id_"+str(bucket_folder)

    to_glacier = Transition(days=0, storage_class='GLACIER')
    rule = Rule(ruleid, bucket_folder, 'Enabled', transition=to_glacier)

    lifecycle = Lifecycle()
    lifecycle.append(rule)

    # ready to backup
    # configuring the bucket with this lifecycle policy
    bucket.configure_lifecycle(lifecycle) 
    # retrieve the current lifecycle policy of the bucket.
    current = bucket.get_lifecycle_config() 
    print current[0].transition
Exemple #41
0
 def __init__(self, bucket_name, s3_to_glacier_after_days=None):
     # create s3 connection
     # create bucket if does not exist
     # create S3 connection if archive_S3_bucket name is specified
     self.__bucket_name = bucket_name
     self.__s3_conn = boto.connect_s3()
     self.__bucket = self.__s3_conn.lookup(self.__bucket_name)
     if not self.__bucket:
         try:
             self.__bucket = self.__s3_conn.create_bucket(self.__bucket_name)
             if s3_to_glacier_after_days is not None:
                 to_glacier = Transition(days=s3_to_glacier_after_days, storage_class='GLACIER')
                 rule = Rule(id='archive-rule1', status='Enabled', transition=to_glacier)
                 lifecycle = Lifecycle()
                 lifecycle.append(rule)
                 self.__bucket.configure_lifecycle(lifecycle)
         except S3CreateError:
             logger.error('failed to create S3 bucket[' + self.__bucket_name + ']. please check your AWS policy.')
             raise
Exemple #42
0
 def set_transition_to_glacier(self, days, prefix=''):
     """
     Set rules when the files should be moved
     to Amazon Glacier for archiving
     This method must be called before write/upload methods
     Not used at the time, but could be for archiving s3 broker files
     :param prefix: str, prefix
     :param days: int, num of days
     :return: None
     """
     try:
         to_glacier = Transition(days=days, storage_class='GLACIER')
         rule = Rule(id='ruleid', prefix=prefix, status='Enabled', transition=to_glacier)
         lifecycle = Lifecycle()
         lifecycle.append(rule)
         self.bucket.configure_lifecycle(lifecycle)
     except Exception as e:
         logging.exception("S3Client.set_transition_to_glacier failed for bucket {}, error {}"
                           "".format(self.bucket_name, e))
def create_folder_and_lifecycle(bucket_name, directory, expiration):
    ''' creates or modifies an existing folder and modifies
        the expiration lifecyce '''
    # Connect to s3 and get the bucket object
    try:
        ak, sk = get_env_creds()
        s3 = boto.connect_s3(aws_access_key_id=ak,
                             aws_secret_access_key=sk)
        bucket = s3.get_bucket(bucket_name)
    except:
        print 'Could not connect to AWS/Bucket: %s' % str(sys.exc_info())
    # if there are no files in this folder yet, create a placeholder lifecycle file
    try:
        count = 0
        files = bucket.list(prefix=directory)
        for f in files:
            count += 1
        if count <= 1:  # insert a dummy file; needed elsewise the policy won't apply
            k = boto.s3.key.Key(bucket)
            k.key = directory + '/.lifecycle_policy.txt'
            utc_now = datetime.utcnow()
            exp_time = utc_now + timedelta(days=expiration)
            content = ('This file was created by the upload portal. The '
                       'expiration policy for this folder was created on %s.'
                       ' These file(s) will automatically expire %s days'
                       ' later, on %s.') % (utc_now.ctime(),
                                            str(expiration),
                                            exp_time.ctime())
            k.set_contents_from_string(content)
    except:
        pass
    # Create and apply the life cycle object to the prefix
    try:
        directory = directory.encode('ascii')
        lifecycle = Lifecycle()
        lifecycle.add_rule(id=directory,
                           prefix=directory,
                           status='Enabled',
                           expiration=expiration)
        bucket.configure_lifecycle(lifecycle)
    except:
        return 'Error creating lifecycle: %s' % str(sys.exc_info())
def s3_uploader(db_backup_bucket, gpg_file_path, update_seq, checksum):
    if db_backup_bucket not in con_s3.get_all_buckets():
        print 'Backup bucket is missing, creating new bucket ', db_backup_bucket
        con_s3.create_bucket(db_backup_bucket)
        bucket = con_s3.get_bucket(db_backup_bucket)

    else:
        bucket = con_s3.get_bucket(db_backup_bucket)
        lifecycle = Lifecycle()
        lifecycle.add_rule('14 Days CouchDB Expiration', os.path.basename(gpg_file_path), 'Enabled', 14)
        bucket.configure_lifecycle(lifecycle)

    key = Key(bucket)
    key.key = os.path.basename(gpg_file_path)
    key.set_acl('authenticated-read')
    key.set_metadata('UpdateSeq', update_seq)
    key.set_metadata('Checksum', checksum)
    key.set_contents_from_file(gpg_file_path)
    key.close()

    print 'Finished uploading backup to S3'
Exemple #45
0
def create_folder_and_lifecycle(bucket_name, directory, expiration):
    ''' creates or modifies an existing folder and modifies
        the expiration lifecyce '''
    # Connect to s3 and get the bucket object
    try:
        ak, sk = get_env_creds()
        s3 = boto.connect_s3(aws_access_key_id=ak, aws_secret_access_key=sk)
        bucket = s3.get_bucket(bucket_name)
    except:
        print 'Could not connect to AWS/Bucket: %s' % str(sys.exc_info())
    # if there are no files in this folder yet, create a placeholder lifecycle file
    try:
        count = 0
        files = bucket.list_versions(prefix=directory)
        for f in files:
            count += 1
        if count <= 1:  # insert a dummy file; needed elsewise the policy won't apply
            k = boto.s3.key.Key(bucket)
            k.key = directory + '/.lifecycle_policy.txt'
            utc_now = datetime.utcnow()
            exp_time = utc_now + timedelta(days=expiration)
            content = ('This file was created by the upload portal. The '
                       'expiration policy for this folder was created on %s.'
                       ' These file(s) will automatically expire %s days'
                       ' later, on %s.') % (utc_now.ctime(), str(expiration),
                                            exp_time.ctime())
            k.set_contents_from_string(content)
    except:
        pass
    # Create and apply the life cycle object to the prefix
    try:
        directory = directory.encode('ascii')
        lifecycle = Lifecycle()
        lifecycle.add_rule(id=directory,
                           prefix=directory,
                           status='Enabled',
                           expiration=expiration)
        bucket.configure_lifecycle(lifecycle)
    except:
        return 'Error creating lifecycle: %s' % str(sys.exc_info())
def s3_uploader(db_backup_bucket, gpg_file_path, update_seq, checksum):
    if db_backup_bucket not in con_s3.get_all_buckets():
        print 'Backup bucket is missing, creating new bucket ', db_backup_bucket
        con_s3.create_bucket(db_backup_bucket)
        bucket = con_s3.get_bucket(db_backup_bucket)

    else:
        bucket = con_s3.get_bucket(db_backup_bucket)
        lifecycle = Lifecycle()
        lifecycle.add_rule('14 Days CouchDB Expiration',
                           os.path.basename(gpg_file_path), 'Enabled', 14)
        bucket.configure_lifecycle(lifecycle)

    key = Key(bucket)
    key.key = os.path.basename(gpg_file_path)
    key.set_acl('authenticated-read')
    key.set_metadata('UpdateSeq', update_seq)
    key.set_metadata('Checksum', checksum)
    key.set_contents_from_file(gpg_file_path)
    key.close()

    print 'Finished uploading backup to S3'
def push_code_to_Aws(dest):
    s3_connection = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                                    aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
    try:
        bucket = s3_connection.get_bucket('calljson')
    except:
        bucket = s3_connection.create_bucket('calljson')
    expiration = Expiration(days=1)
    rule = Rule(id='ruleid', status='Enabled', expiration=expiration)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
    # create new key in s3
    key = bucket.new_key(dest)
    key.content_type = 'text/plain'
    f = open(dest, 'r')
    mystring = f.read()
    key.set_contents_from_string(mystring, policy='public-read')
    time.sleep(2)
    url = key.generate_url(160)
    o = urlparse(url)
    return o.scheme + "://" + o.netloc + o.path
Exemple #48
0
    def uploadfile(self, file, bucketname):
        try :
            uploadfile = file
            bucketname = bucketname
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename', prefix='logs/', status='Enabled',
                   expiration=Expiration(days=10))
            conn = boto.connect_s3()

            if conn.lookup(bucketname): #bucketexisits
                bucket = conn.get_bucket(bucketname)
            else:
                #create a bucket
                bucket = conn.create_bucket(bucketname, location=boto.s3.connection.Location.DEFAULT)
            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key
            k = Key(bucket)
            k.key = uploadfile
            k.set_contents_from_filename(uploadfile, cb=self.percent_cb, num_cb=10)
            k.set_acl('public-read-write')
        except Exception,e:
            print 'falied {0}'.format(str(e))
def cleanup_bucket(s3, bucket):
	"""Glacier-proofs the bucket by renaming the .manifest files to not get moved 
	to glacier via our lifecycle rule
	"""

	# this isn't proof against eventual consistency, but it helps
	time.sleep(10)

	keys = bucket.list()
	
	# rename all the manifest and signature files so they don't get moved to glacier
	for key in keys:
		if not key.name.startswith("_") and \
		key.name.endswith(".manifest"):  # or key.name.endswith(".sigtar.gz")):
			key.copy(bucket.name, "_" + key.name)
			key.delete()

	# re-establish our lifecycle rules
	to_glacier = Transition(days=1, storage_class='GLACIER')
	rule = Rule('movetoglacier', 'duplicity', 'Enabled', transition=to_glacier)
	lifecycle = Lifecycle()
	lifecycle.append(rule)
	bucket.configure_lifecycle(lifecycle)
Exemple #50
0
def cleanup_bucket(s3, bucket):
    """Glacier-proofs the bucket by renaming the .manifest files to not get moved 
	to glacier via our lifecycle rule
	"""

    # this isn't proof against eventual consistency, but it helps
    time.sleep(10)

    keys = bucket.list()

    # rename all the manifest and signature files so they don't get moved to glacier
    for key in keys:
        if not key.name.startswith("_") and \
        key.name.endswith(".manifest"):  # or key.name.endswith(".sigtar.gz")):
            key.copy(bucket.name, "_" + key.name)
            key.delete()

    # re-establish our lifecycle rules
    to_glacier = Transition(days=1, storage_class='GLACIER')
    rule = Rule('movetoglacier', 'duplicity', 'Enabled', transition=to_glacier)
    lifecycle = Lifecycle()
    lifecycle.append(rule)
    bucket.configure_lifecycle(lifecycle)
    def upload_file(self, filename):
        try:
            lifecycle = Lifecycle()
            lifecycle.add_rule('rulename',
                               prefix='logs/',
                               status='Enabled',
                               expiration=Expiration(days=10))
            conn = boto.connect_s3(aws_secret_access_key=self.ec2_secret_key,
                                   aws_access_key_id=self.ec2_access_key)

            if conn.lookup(self.bucket_name):  # bucket exisits
                bucket = conn.get_bucket(self.bucket_name)
            else:
                # create a bucket
                bucket = conn.create_bucket(
                    self.bucket_name,
                    location=boto.s3.connection.Location.DEFAULT)

            bucket.configure_lifecycle(lifecycle)
            from boto.s3.key import Key

            k = Key(bucket)
            k.key = filename
            k.set_contents_from_filename(filename,
                                         cb=self.percent_cb,
                                         num_cb=10)
            k.set_acl('public-read-write')

            return "https://s3.amazonaws.com/{bucket}/{filename}".format(
                bucket=self.bucket_name, filename=filename)

        except Exception, e:
            logging.error("S3StorageAgent failed with exception:\n{0}".format(
                str(e)))
            logging.error(traceback.format_exc())
            raise e
Exemple #52
0
def destroy_lifecycle_rule(connection, module):

    name = module.params.get("name")
    prefix = module.params.get("prefix")
    rule_id = module.params.get("rule_id")
    changed = False

    if prefix is None:
        prefix = ""

    try:
        bucket = connection.get_bucket(name)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    # Get the bucket's current lifecycle rules
    try:
        current_lifecycle_obj = bucket.get_lifecycle_config()
    except S3ResponseError as e:
        if e.error_code == "NoSuchLifecycleConfiguration":
            module.exit_json(changed=changed)
        else:
            module.fail_json(msg=e.message)

    # Create lifecycle
    lifecycle_obj = Lifecycle()

    # Check if rule exists
    # If an ID exists, use that otherwise compare based on prefix
    if rule_id is not None:
        for existing_rule in current_lifecycle_obj:
            if rule_id == existing_rule.id:
                # We're not keeping the rule (i.e. deleting) so mark as changed
                changed = True
            else:
                lifecycle_obj.append(existing_rule)
    else:
        for existing_rule in current_lifecycle_obj:
            if prefix == existing_rule.prefix:
                # We're not keeping the rule (i.e. deleting) so mark as changed
                changed = True
            else:
                lifecycle_obj.append(existing_rule)

    # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
    try:
        if lifecycle_obj:
            bucket.configure_lifecycle(lifecycle_obj)
        else:
            bucket.delete_lifecycle_configuration()
    except BotoServerError as e:
        module.fail_json(msg=e.message)

    module.exit_json(changed=changed)
Exemple #53
0
 def test_lifecycle_multi(self):
     date = '2022-10-12T00:00:00.000Z'
     sc = 'GLACIER'
     lifecycle = Lifecycle()
     lifecycle.add_rule("1", "1/", "Enabled", 1)
     lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
     lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
     lifecycle.add_rule("4", "4/", "Enabled", None,
                        Transition(days=4, storage_class=sc))
     lifecycle.add_rule("5", "5/", "Enabled", None,
                        Transition(date=date, storage_class=sc))
     # set the lifecycle
     self.bucket.configure_lifecycle(lifecycle)
     # read the lifecycle back
     readlifecycle = self.bucket.get_lifecycle_config()
     for rule in readlifecycle:
         if rule.id == "1":
             self.assertEqual(rule.prefix, "1/")
             self.assertEqual(rule.expiration.days, 1)
         elif rule.id == "2":
             self.assertEqual(rule.prefix, "2/")
             self.assertEqual(rule.expiration.days, 2)
         elif rule.id == "3":
             self.assertEqual(rule.prefix, "3/")
             self.assertEqual(rule.expiration.date, date)
         elif rule.id == "4":
             self.assertEqual(rule.prefix, "4/")
             self.assertEqual(rule.transition.days, 4)
             self.assertEqual(rule.transition.storage_class, sc)
         elif rule.id == "5":
             self.assertEqual(rule.prefix, "5/")
             self.assertEqual(rule.transition.date, date)
             self.assertEqual(rule.transition.storage_class, sc)
         else:
             self.fail("unexpected id %s" % rule.id)
Exemple #54
0
 def test_expiration_with_no_transition(self):
     lifecycle = Lifecycle()
     lifecycle.add_rule('myid', 'prefix', 'Enabled', 30)
     xml = lifecycle.to_xml()
     self.assertIn('<Expiration><Days>30</Days></Expiration>', xml)
Exemple #55
0
def create_lifecycle_rule(connection, module):

    name = module.params.get("name")
    expiration_date = module.params.get("expiration_date")
    expiration_days = module.params.get("expiration_days")
    prefix = module.params.get("prefix")
    rule_id = module.params.get("rule_id")
    status = module.params.get("status")
    storage_class = module.params.get("storage_class")
    transition_date = module.params.get("transition_date")
    transition_days = module.params.get("transition_days")
    changed = False

    try:
        bucket = connection.get_bucket(name)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    # Get the bucket's current lifecycle rules
    try:
        current_lifecycle_obj = bucket.get_lifecycle_config()
    except S3ResponseError as e:
        if e.error_code == "NoSuchLifecycleConfiguration":
            current_lifecycle_obj = Lifecycle()
        else:
            module.fail_json(msg=e.message)

    # Create expiration
    if expiration_days is not None:
        expiration_obj = Expiration(days=expiration_days)
    elif expiration_date is not None:
        expiration_obj = Expiration(date=expiration_date)
    else:
        expiration_obj = None

    # Create transition
    if transition_days is not None:
        transition_obj = Transition(days=transition_days,
                                    storage_class=storage_class.upper())
    elif transition_date is not None:
        transition_obj = Transition(date=transition_date,
                                    storage_class=storage_class.upper())
    else:
        transition_obj = None

    # Create rule
    rule = Rule(rule_id, prefix, status.title(), expiration_obj,
                transition_obj)

    # Create lifecycle
    lifecycle_obj = Lifecycle()

    appended = False
    # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
    if current_lifecycle_obj:
        # If rule ID exists, use that for comparison otherwise compare based on prefix
        for existing_rule in current_lifecycle_obj:
            if rule.id == existing_rule.id:
                if compare_rule(rule, existing_rule):
                    lifecycle_obj.append(rule)
                    appended = True
                else:
                    lifecycle_obj.append(rule)
                    changed = True
                    appended = True
            elif rule.prefix == existing_rule.prefix:
                existing_rule.id = None
                if compare_rule(rule, existing_rule):
                    lifecycle_obj.append(rule)
                    appended = True
                else:
                    lifecycle_obj.append(rule)
                    changed = True
                    appended = True
            else:
                lifecycle_obj.append(existing_rule)
        # If nothing appended then append now as the rule must not exist
        if not appended:
            lifecycle_obj.append(rule)
            changed = True
    else:
        lifecycle_obj.append(rule)
        changed = True

    # Write lifecycle to bucket
    try:
        bucket.configure_lifecycle(lifecycle_obj)
    except S3ResponseError as e:
        module.fail_json(msg=e.message)

    module.exit_json(changed=changed)
Exemple #56
0
def test_lifecycle_multi():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    date = "2022-10-12T00:00:00.000Z"
    sc = "GLACIER"
    lifecycle = Lifecycle()
    lifecycle.add_rule("1", "1/", "Enabled", 1)
    lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
    lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
    lifecycle.add_rule("4", "4/", "Enabled", None,
                       Transition(days=4, storage_class=sc))
    lifecycle.add_rule("5", "5/", "Enabled", None,
                       Transition(date=date, storage_class=sc))

    bucket.configure_lifecycle(lifecycle)
    # read the lifecycle back
    rules = bucket.get_lifecycle_config()

    for rule in rules:
        if rule.id == "1":
            rule.prefix.should.equal("1/")
            rule.expiration.days.should.equal(1)
        elif rule.id == "2":
            rule.prefix.should.equal("2/")
            rule.expiration.days.should.equal(2)
        elif rule.id == "3":
            rule.prefix.should.equal("3/")
            rule.expiration.date.should.equal(date)
        elif rule.id == "4":
            rule.prefix.should.equal("4/")
            rule.transition.days.should.equal(4)
            rule.transition.storage_class.should.equal(sc)
        elif rule.id == "5":
            rule.prefix.should.equal("5/")
            rule.transition.date.should.equal(date)
            rule.transition.storage_class.should.equal(sc)
        else:
            assert False, "Invalid rule id"
Exemple #57
0
def test_lifecycle_multi():
    conn = boto.s3.connect_to_region("us-west-1")
    bucket = conn.create_bucket("foobar")

    date = '2022-10-12T00:00:00.000Z'
    sc = 'GLACIER'
    lifecycle = Lifecycle()
    lifecycle.add_rule("1", "1/", "Enabled", 1)
    lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
    lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
    lifecycle.add_rule("4", "4/", "Enabled", None,
        Transition(days=4, storage_class=sc))
    lifecycle.add_rule("5", "5/", "Enabled", None,
        Transition(date=date, storage_class=sc))

    bucket.configure_lifecycle(lifecycle)
    # read the lifecycle back
    rules = bucket.get_lifecycle_config()

    for rule in rules:
        if rule.id == "1":
            rule.prefix.should.equal("1/")
            rule.expiration.days.should.equal(1)
        elif rule.id == "2":
            rule.prefix.should.equal("2/")
            rule.expiration.days.should.equal(2)
        elif rule.id == "3":
            rule.prefix.should.equal("3/")
            rule.expiration.date.should.equal(date)
        elif rule.id == "4":
            rule.prefix.should.equal("4/")
            rule.transition.days.should.equal(4)
            rule.transition.storage_class.should.equal(sc)
        elif rule.id == "5":
            rule.prefix.should.equal("5/")
            rule.transition.date.should.equal(date)
            rule.transition.storage_class.should.equal(sc)
        else:
            assert False, "Invalid rule id"