def test_lifecycle_multi(self): date = '2022-10-12T00:00:00.000Z' sc = 'GLACIER' lifecycle = Lifecycle() lifecycle.add_rule("1", "1/", "Enabled", 1) lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, Transition(date=date, storage_class=sc)) # set the lifecycle self.bucket.configure_lifecycle(lifecycle) # read the lifecycle back readlifecycle = self.bucket.get_lifecycle_config() for rule in readlifecycle: if rule.id == "1": self.assertEqual(rule.prefix, "1/") self.assertEqual(rule.expiration.days, 1) elif rule.id == "2": self.assertEqual(rule.prefix, "2/") self.assertEqual(rule.expiration.days, 2) elif rule.id == "3": self.assertEqual(rule.prefix, "3/") self.assertEqual(rule.expiration.date, date) elif rule.id == "4": self.assertEqual(rule.prefix, "4/") self.assertEqual(rule.transition.days, 4) self.assertEqual(rule.transition.storage_class, sc) elif rule.id == "5": self.assertEqual(rule.prefix, "5/") self.assertEqual(rule.transition.date, date) self.assertEqual(rule.transition.storage_class, sc) else: self.fail("unexpected id %s" % rule.id)
def upload_file(self, filename): try: lifecycle = Lifecycle() lifecycle.add_rule('rulename', prefix='logs/', status='Enabled', expiration=Expiration(days=10)) conn = boto.connect_s3() if conn.lookup(self.bucket_name): # bucket exisits bucket = conn.get_bucket(self.bucket_name) else: # create a bucket bucket = conn.create_bucket( self.bucket_name, location=boto.s3.connection.Location.DEFAULT) bucket.configure_lifecycle(lifecycle) from boto.s3.key import Key k = Key(bucket) k.key = filename k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10) k.set_acl('public-read-write') except Exception, e: sys.stdout.write( "AmazonS3Agent failed with exception:\n{0}".format(str(e))) sys.stdout.flush() raise e
def compare_rule(rule_a, rule_b): # Copy objects rule1 = copy.deepcopy(rule_a) rule2 = copy.deepcopy(rule_b) # Delete Rule from Rule try: del rule1.Rule except AttributeError: pass try: del rule2.Rule except AttributeError: pass # Extract Expiration and Transition objects rule1_expiration = rule1.expiration rule1_transition = rule1.transition rule2_expiration = rule2.expiration rule2_transition = rule2.transition # Delete the Expiration and Transition objects from the Rule objects del rule1.expiration del rule1.transition del rule2.expiration del rule2.transition # Compare if rule1_transition is None: rule1_transition = Transition() if rule2_transition is None: rule2_transition = Transition() if rule1_expiration is None: rule1_expiration = Expiration() if rule2_expiration is None: rule2_expiration = Expiration() if (rule1.__dict__ == rule2.__dict__) and ( rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__): return True else: return False
def lifecycle(name): text = 'Thu, 16 Dec 2013' date = (parser.parse(text)) d = date.isoformat() bucket = conn.get_bucket(name) lifecycle_config = boto.s3.lifecycle.Lifecycle() lifecycle_config.add_rule("lc1","/", "Enabled",expiration=Expiration(date=d)) bucket.configure_lifecycle(lifecycle_config)
def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") date = "2022-10-12T00:00:00.000Z" sc = "GLACIER" lifecycle = Lifecycle() lifecycle.add_rule("1", "1/", "Enabled", 1) lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, Transition(date=date, storage_class=sc)) bucket.configure_lifecycle(lifecycle) # read the lifecycle back rules = bucket.get_lifecycle_config() for rule in rules: if rule.id == "1": rule.prefix.should.equal("1/") rule.expiration.days.should.equal(1) elif rule.id == "2": rule.prefix.should.equal("2/") rule.expiration.days.should.equal(2) elif rule.id == "3": rule.prefix.should.equal("3/") rule.expiration.date.should.equal(date) elif rule.id == "4": rule.prefix.should.equal("4/") rule.transition.days.should.equal(4) rule.transition.storage_class.should.equal(sc) elif rule.id == "5": rule.prefix.should.equal("5/") rule.transition.date.should.equal(date) rule.transition.storage_class.should.equal(sc) else: assert False, "Invalid rule id"
def get_lifecycle(expiration_path, days_to_expiration): if days_to_expiration is not None and expiration_path is not None: lifecycle = Lifecycle() print "Adding expiration rule of %s days for S3 path %s" % ( days_to_expiration, expiration_path) lifecycle.add_rule('expirationrule', prefix=expiration_path, status='Enabled', expiration=Expiration(days=int(days_to_expiration))) return lifecycle else: print "No expiration rule added" return None
def upload(bucket_name, image_name, image): conn = boto.connect_s3() bucket = conn.get_bucket(bucket_name) lifecycle = Lifecycle() lifecycle.add_rule('s3-image-uploader', prefix=FILE_PREFIX, status='Enabled', expiration=Expiration(days=EXPIRATION)) bucket.configure_lifecycle(lifecycle) k = Key(bucket) k.key = image_name k.set_contents_from_string(image)
def create_s3(): """ Create the S3 buckets All the buckets use the galaxy name as the 'folder' :return: """ # Create the bucket for the images s3 = boto.connect_s3() images_bucket = 'icrar.{0}.galaxy-images'.format(env.project_name) bucket = s3.create_bucket(images_bucket) bucket.set_acl('public-read') bucket.configure_website(suffix='index.html') bucket.set_policy('''{ "Statement":[ { "Sid":"PublicReadForGetBucketObjects", "Effect":"Allow", "Principal": { "AWS": "*" }, "Action":["s3:GetObject"], "Resource":["arn:aws:s3:::%s/*"] } ] } ''' % images_bucket) # Create the bucket for the output files file_bucket = 'icrar.{0}.files'.format(env.project_name) s3.create_bucket(file_bucket) # Create the bucket for the stats files file_bucket = 'icrar.{0}.archive'.format(env.project_name) bucket = s3.create_bucket(file_bucket) to_glacier = Transition(days=10, storage_class='GLACIER') rule1 = Rule('rule01', status='Enabled', prefix='stats/', transition=to_glacier) rule2 = Rule('rule02', status='Enabled', prefix='logs/', expiration=Expiration(days=20)) lifecycle = Lifecycle() lifecycle.append(rule1) lifecycle.append(rule2) bucket.configure_lifecycle(lifecycle)
def set_bucket_lifetime(bucket_name, days=14, aws_access={}, conn=None): ''' Set an expiration on a bucket in S3. ''' conn = return_s3_connection(aws_access) if conn is None else conn bucket = conn.get_bucket(bucket_name) expiration = Expiration(days=days) rule = Rule(id='ruleid', prefix='', status='Enabled', expiration=expiration) lifecycle = Lifecycle() lifecycle.append(rule) return bucket.configure_lifecycle(lifecycle)
def test_set_lifecycle_policy(): """ PUTs arbitraty lifecycle_policy and checks whether GET lifecycle_policy API call returns 200 and other lifecycle_policy metadata is as set in PUT call """ bucket = helpers.get_bucket() transitions = Transitions() transitions.add_transition(days=30, storage_class='STANDARD_IA') transitions.add_transition(days=90, storage_class='GLACIER') expiration = Expiration(days=120) rule = Rule(id='ruleid', prefix='logs/', status='Enabled', expiration=expiration, transition=transitions) lifecycle = Lifecycle() lifecycle.append(rule) assert bucket.configure_lifecycle(lifecycle) == True
def lifecycle(): #transitions = Transitions() exp = Expiration(date="2018-06-13 07:05:00") #exp = Expiration(days=1) rule = Rule(id='rule-1', prefix='', status='Enabled', expiration=exp) lifecycle = Lifecycle() lifecycle.append(rule) bucket = conn.get_bucket(bucket_name) ret = bucket.configure_lifecycle(lifecycle) print "Bucket Lifecycle Set:", ret print "=========================" current = bucket.get_lifecycle_config() print "Bucket Lifecycle Conf:", current print "Tran:", current[0].transition print "Expi:", current[0].expiration print "========================="
def push_code_to_Aws(dest): s3_connection = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) try: bucket = s3_connection.get_bucket('calljson') except: bucket = s3_connection.create_bucket('calljson') expiration = Expiration(days=1) rule = Rule(id='ruleid', status='Enabled', expiration=expiration) lifecycle = Lifecycle() lifecycle.append(rule) bucket.configure_lifecycle(lifecycle) # create new key in s3 key = bucket.new_key(dest) key.content_type = 'text/plain' f = open(dest, 'r') mystring = f.read() key.set_contents_from_string(mystring, policy='public-read') time.sleep(2) url = key.generate_url(160) o = urlparse(url) return o.scheme + "://" + o.netloc + o.path
def upload_file(self, filename): try: lifecycle = Lifecycle() lifecycle.add_rule('rulename', prefix='logs/', status='Enabled', expiration=Expiration(days=10)) conn = boto.connect_s3(aws_secret_access_key=self.ec2_secret_key, aws_access_key_id=self.ec2_access_key) if conn.lookup(self.bucket_name): # bucket exisits bucket = conn.get_bucket(self.bucket_name) else: # create a bucket bucket = conn.create_bucket( self.bucket_name, location=boto.s3.connection.Location.DEFAULT) bucket.configure_lifecycle(lifecycle) from boto.s3.key import Key k = Key(bucket) k.key = filename k.set_contents_from_filename(filename, cb=self.percent_cb, num_cb=10) k.set_acl('public-read-write') return "https://s3.amazonaws.com/{bucket}/{filename}".format( bucket=self.bucket_name, filename=filename) except Exception, e: logging.error("S3StorageAgent failed with exception:\n{0}".format( str(e))) logging.error(traceback.format_exc()) raise e
def create_lifecycle_rule(connection, module): name = module.params.get("name") expiration_date = module.params.get("expiration_date") expiration_days = module.params.get("expiration_days") prefix = module.params.get("prefix") rule_id = module.params.get("rule_id") status = module.params.get("status") storage_class = module.params.get("storage_class") transition_date = module.params.get("transition_date") transition_days = module.params.get("transition_days") changed = False try: bucket = connection.get_bucket(name) except S3ResponseError as e: module.fail_json(msg=e.message) # Get the bucket's current lifecycle rules try: current_lifecycle_obj = bucket.get_lifecycle_config() except S3ResponseError as e: if e.error_code == "NoSuchLifecycleConfiguration": current_lifecycle_obj = Lifecycle() else: module.fail_json(msg=e.message) # Create expiration if expiration_days is not None: expiration_obj = Expiration(days=expiration_days) elif expiration_date is not None: expiration_obj = Expiration(date=expiration_date) else: expiration_obj = None # Create transition if transition_days is not None: transition_obj = Transition(days=transition_days, storage_class=storage_class.upper()) elif transition_date is not None: transition_obj = Transition(date=transition_date, storage_class=storage_class.upper()) else: transition_obj = None # Create rule rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj) # Create lifecycle lifecycle_obj = Lifecycle() appended = False # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule if current_lifecycle_obj: # If rule ID exists, use that for comparison otherwise compare based on prefix for existing_rule in current_lifecycle_obj: if rule.id == existing_rule.id: if compare_rule(rule, existing_rule): lifecycle_obj.append(rule) appended = True else: lifecycle_obj.append(rule) changed = True appended = True elif rule.prefix == existing_rule.prefix: existing_rule.id = None if compare_rule(rule, existing_rule): lifecycle_obj.append(rule) appended = True else: lifecycle_obj.append(rule) changed = True appended = True else: lifecycle_obj.append(existing_rule) # If nothing appended then append now as the rule must not exist if not appended: lifecycle_obj.append(rule) changed = True else: lifecycle_obj.append(rule) changed = True # Write lifecycle to bucket try: bucket.configure_lifecycle(lifecycle_obj) except S3ResponseError as e: module.fail_json(msg=e.message) module.exit_json(changed=changed)
import boto from amazon.dynamo import User from app import user_keys s3 = boto.connect_s3() for u in User().get_sites(): from boto.s3.lifecycle import ( Lifecycle, Expiration, ) bucket = s3.get_bucket(u[user_keys.user_role]) bucket.delete_lifecycle_configuration() lifecycle = Lifecycle() for l in u[user_keys.user_site_leagues]: lifecycle.add_rule(l + ' tweets expire', prefix=l + '/tweet', status='Enabled', expiration=Expiration(days=7)) bucket.configure_lifecycle(lifecycle)
def test_bucket_lifecycle(self): lifecycle_id = 'eutester lifecycle test' lifecycle_prefix = 'eulifecycle' lifecycle_status = 'Enabled' lifecycle_expiration = 1 bucket_name = self.bucket_prefix + "lifecycle-test0" self.buckets_used.add(bucket_name) bucket = self.tester.create_bucket(bucket_name) lifecycle = Lifecycle() lifecycle.add_rule(lifecycle_id, lifecycle_prefix, lifecycle_status, lifecycle_expiration) bucket.configure_lifecycle(lifecycle) responses = bucket.get_lifecycle_config() assert (len(responses) == 1), 'found not true' lifecycle_response = responses[0] assert ( lifecycle_response.id == lifecycle_id ), "Expected lifecycle Id to be: " + lifecycle_id + " found " + lifecycle_response.id assert ( lifecycle_response.prefix == lifecycle_prefix ), "Expected lifecycle prefix to be: " + lifecycle_prefix + " found " + lifecycle_response.prefix assert ( lifecycle_response.status == lifecycle_status ), "Expected lifecycle status to be: " + lifecycle_status + " found " + lifecycle_response.status assert (lifecycle_response.expiration.days == lifecycle_expiration ), "Expected lifecycle expiration days to be: " + str( lifecycle_expiration) + " found " + str( lifecycle_response.expiration.days) bucket.delete_lifecycle_configuration() assert ( len(responses) == 1), "Expected no configuration, found " + len( responses) + " configuration" # multiple rules bucket_name = self.bucket_prefix + "lifecycle-test1" bucket = self.tester.create_bucket(bucket_name) self.buckets_used.add(bucket_name) date = '2022-10-12T00:10:10.011Z' lifecycle = Lifecycle() lifecycle.add_rule("1", "1/", "Enabled", 1) lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Disabled", Expiration(date=date)) bucket.configure_lifecycle(lifecycle) lifecycle_responses = bucket.get_lifecycle_config() if lifecycle_responses < 0: self.fail("no lifecycle found!") for response in lifecycle_responses: if response.id == "1": assert ( response.prefix == "1/" ), "Expected lifecycle prefix to be: " + "1/" + " found: " + response.prefix assert ( response.status == "Enabled" ), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status assert (response.expiration.days == 1 ), "Expected lifecycle expiration days to be: " + str( 1) + " found " + str(response.expiration.days) elif response.id == "2": assert ( response.prefix == "2/" ), "Expected lifecycle prefix to be: " + "2/" + " found: " + response.prefix assert ( response.status == "Enabled" ), "Expected lifecycle status to be: " + "Enabled" + " found: " + response.status assert (response.expiration.days == 2 ), "Expected lifecycle expiration days to be: " + str( 2) + " found " + str(response.expiration.days) elif response.id == "3": assert ( response.prefix == "3/" ), "Expected lifecycle prefix to be: " + "3/" + " found: " + response.prefix assert ( response.status == "Enabled" ), "Expected lifecycle status to be: " + "Enabled" + " found " + response.status assert ( response.expiration.date == date ), "Expected lifecycle expiration days to be: " + date + " found " + str( response.expiration.date) elif response.id == "4": assert ( response.prefix == "4/" ), "Expected lifecycle prefix to be: " + "4/" + " found: " + response.prefix assert ( response.status == "Disabled" ), "Expected lifecycle status to be: " + "Disabled" + " found " + response.status assert ( response.expiration.date == date ), "Expected lifecycle expiration days to be: " + date + " found " + str( response.expiration.date) else: self.fail("no response found") self.debug("Cleaning up used buckets") for bucket in self.buckets_used: self.tester.clear_bucket(bucket)
bucket = connection.get_bucket(name) except S3ResponseError, e: module.fail_json(msg=e.message) # Get the bucket's current lifecycle rules try: current_lifecycle_obj = bucket.get_lifecycle_config() except S3ResponseError, e: if e.error_code == "NoSuchLifecycleConfiguration": current_lifecycle_obj = Lifecycle() else: module.fail_json(msg=e.message) # Create expiration if expiration_days is not None: expiration_obj = Expiration(days=expiration_days) elif expiration_date is not None: expiration_obj = Expiration(date=expiration_date) else: expiration_obj = None # Create transition if transition_days is not None: transition_obj = Transition(days=transition_days, storage_class=storage_class.upper()) elif transition_date is not None: transition_obj = Transition(date=transition_date, storage_class=storage_class.upper()) else: transition_obj = None
def lifecycle(name): bucket = conn.get_bucket(name) lifecycle_config = boto.s3.lifecycle.Lifecycle() lifecycle_config.add_rule("lc1","/", "Enabled",expiration=Expiration(days=30)) bucket.configure_lifecycle(lifecycle_config)