def get_session(self): if not self.region: print("Unable to create boto3 session, please define region") return elif self.profile: return boto3.session(profile=self.profile, region=self.region) else: return boto3.session(region=self.region)
def setUp(self): self.bucket = '' self.key= '' self.session = boto3.session.Session() self.cfn_client = boto3.session('cloudformation') self.s3_client = boto3.session('s3') self.url = self.s3_client.generate_presigned_url('get_object', Params={'Bucket': self.bucket, 'Key': self.key}, ExpiresIn=60) self.template_details = self.cfn_client.get_template_summary(TemplateURL=self.url) self.job_identifier = '' self.parameters = [] self.tags = []
def determine_key_type(bucket, key, profile): """Return values : 'File', 'Folder' or 'Does not exist'""" if profile: s3 = boto3.session(profile_name=profile).client('s3') else: s3 = boto3.client('s3') if not key: raise Exception("Cannot determine key type - no key is specified") if not bucket: raise Exception("Cannot determine key type - no bucket is specified") if key.endswith('/'): key = key.rstrip('/') res = s3.list_objects_v2(Bucket=bucket, Prefix=key + '/') if not 'KeyCount' in res: raise Exception("Cannot determine key type - no response from S3") if res['KeyCount'] == 0: res2 = s3.list_objects_v2(Bucket=bucket, Prefix=key) if not 'KeyCount' in res2: raise Exception("Cannot determine key type - no response from S3") elif res2['KeyCount'] == 0: return 'Does not exist' # key does not exist # The file itself may be a prefix of another file (e.v. abc.vcf.gz vs abc.vcf.gz.tbi) # but it doesn't matter. else: return 'File' else: # data_file is a folder return 'Folder'
def lambda_handler(event, context): global session session = boto3.Session() print(json.dumps(event, sort_keys=True)) main_handler() s3 = boto3.resource('s3') bucket = s3.Bucket(os.getenv("REPORT_STORAGE")) bucket.upload_file( '/tmp/cost_explorer_report.xlsx', boto3.session('sts').get_caller_identity()['Account'] + '.xlsx')
import boto3 AWS_ACCESS_KEY_ID = 'xxxxxxxxxxxxxxxxxxxxxx' AWS_SECRET_ACCESS_KEY = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' session=boto3.session(profile_name="aws_ec2_iam_user",region_name='us-east-1') ec2_re_ob=session.resource(service_name="ec2") for each_instance in ec2_re_ob.instances.all(): print(each_instance)
# coding: utf-8 import boto3 session = boto3.session(profile_name='pythonAutomation') session = boto3.Session(profile_name='pythonAutomation') s3 = session.resource('s3') for bucket in s3.buckets.all(): print(buckets) for bucket in s3.buckets.all(): print(bucket) for bucket in s3.buckets.all(): print(bucket) new_bucket = s3.create_bucket(Bucket='automatingaws-vuk-ipython') for bucket in s3.buckets.all(): print(bucket)
import boto3 if __name__ == '__main__': session = boto3.session(profile_name='sanjay') ec2 = session.resource('ec2') for i in ec2.instances.all(): print(i)
# coding: utf-8 inmport boto3 import boto3 session = boto3.session(profile_name='automate') session = boto3.Session(profile_name='automate') as_client = session.client{'autoscaling') as_client = session.client('autoscaling') as_client.decribe_auto-scaling_groups() as_client.describe_auto_scaling_groups() as_client.describe_policies() as_client.execute_policy(AutoScalingGroupName='notifon example group', PolicyName='scale down') as_client.execute_policy(AutoScalingGroupName='notifon example group', PolicyName='scale up') as_client.execute_policy(AutoScalingGroupName='notifon example group', PolicyName='scale up')
# Add float-specific serialization code def _serialize_n(self, value): if isinstance(value, float): with decimal.localcontext(types.DYNAMODB_CONTEXT) as context: context.traps[decimal.Inexact] = 0 context.traps[decimal.Rounded] = 0 number = str(context.create_decimal_from_float(value)) return number number = super(FloatSerializer, self)._serialize_n(value) return number # By the way, you can not write dictionaries with int/float/whatever keys as is, # boto3 does not convert them to strings automatically. # # And DynamoDB does not support numerical keys anyway, # so this crude workaround seems reasonable. def _serialize_m(self, value): return {str(k): self.serialize(v) for k, v in value.items()} import boto3 from unittest.mock import patch session = boto3.session() # TypeSerializers are created on resource creation, so we need to patch it here. with patch("boto3.dynamodb.types.TypeSerializer", new=FloatSerializer): db = session.resource("dynamodb")
import boto3 session = boto3.session(profile_name='shotty') ec2 = session.resource('ec2') def list_instances(): for i in ec2.instances.all(): print(i) if __name__ == '__main__': list_instances()
'AttributeName': 'water_temp', 'KeyType': 'RANGE' # Sort key } ], AttributeDefinitions=[ { 'AttributeName': 'log_id', 'AttributeType': 'N' }, { 'AttributeName': 'water_temp', 'AttributeType': 'S' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 }) return table if __name__ == '__main__': session = boto3.session( region_name='eu-west-2', aws_access_key_id=AKIA3SOP7ROQ3BZJJ6UI, aws_secret_access_key=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) movie_table = create_movie_table() print("Table status:", movie_table.table_status)
def create_aws_session(aws_target: str = "ec2") -> object: """ Instantiate an aws session. """ return boto3.session(aws_target)