def test_create_client_with_args(self): bc_session = self.bc_session_cls.return_value session = Session(region_name='us-east-1') session.client('sqs', region_name='us-west-2') bc_session.create_client.assert_called_with( 'sqs', aws_secret_access_key=None, aws_access_key_id=None, endpoint_url=None, use_ssl=True, aws_session_token=None, verify=None, region_name='us-west-2', api_version=None)
def get_machine_ip(): session = Session(region_name='us-east-1') asg_client = session.client('autoscaling') name = auto_scaling_group_name() asg_response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[name]) instance_id = asg_response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] ec2_client = session.client('ec2') instances = ec2_client.describe_instances(InstanceIds=[instance_id]) return instances['Reservations'][0]['Instances'][0]['PublicIpAddress']
class AwsConnection(object): """ Home directory of the user. This will ensure all platforms are fine """ homedir = expanduser("~") """ Cred file location """ awscred = homedir + '/.aws/credentials' __session_token = None aws_region = None elbclient = None ec2client = None ec2resource = None r53client = None cfclient = None def __init__(self, awsaccno=None): self.awsaccno = awsaccno if self.awsaccno == None: self.awsaccno = 'default' self.__config = ConfigParser() self.__config.read(self.awscred) if self.__config.has_option(self.awsaccno, "aws_session_token"): self.__session_token = self.__config.get(self.awsaccno, "aws_session_token") if self.__config.has_option(self.awsaccno, "region_name"): self.aws_region = self.__config.get(self.awsaccno, "region_name") else: self.aws_region = 'us-west-2' self.session = Session(aws_access_key_id=self.__config.get(self.awsaccno, "aws_access_key_id"), aws_secret_access_key=self.__config.get(self.awsaccno, "aws_secret_access_key"), aws_session_token=self.__session_token, region_name=self.aws_region) """ elb client """ self.elbclient = self.session.client('elb') """ ec2 client """ self.ec2client = self.session.client('ec2') """ ec2 resource """ self.ec2resource = self.session.resource('ec2') """ route53 client """ self.r53client = self.session.client('route53') """ cloudformation client """ self.cfclient = self.session.client('cloudformation') """ rds client""" self.rdsclient = self.session.client("rds")
def deploy(prepared_file, timeout=300): """ Topline driver idempotent """ opts = yaml.load(open(prepared_file, 'rt')) if '__PREPARED__' not in opts: raise Exception("using 'unprepared' file to deploy." " First run prepare on it") session = Session(profile_name=opts.get('profile_name'), region_name=opts['region']) ec2 = session.resource("ec2") cff = session.resource("cloudformation") route53 = session.client("route53") elb = session.client("elb") stack = create_stack(opts, ec2, cff) # ensure that stack is ready wait_for_stack_ready(stack, timeout) # 'arn:aws:cloudformation:us-east-1:375783000519:stack/mjog-pcf-42f062-OpsManStack-13R2QRZJCIPTB/ec4e3010-ef99-11e5-9206-500c28604c82' opsman_stack_arn = next( ll for ll in stack.resource_summaries.iterator() if ll.logical_id == 'OpsManStack').physical_resource_id opsman_stack = get_stack(opsman_stack_arn.split('/')[1], cff) stack_vars = get_stack_outputvars(opsman_stack, ec2) stack_vars.update(get_stack_outputvars(stack, ec2)) ops_manager_inst = launch_ops_manager(opts, stack_vars, ec2) # ensure that ops manager is ready to receive requests wait_for_opsman_ready(ops_manager_inst, timeout) dnsmapping.map_ert_domain( stackname=opts['stack-name'], domain=opts['domain'], route53=route53, elb=elb) ops = configure_ops_manager(opts, stack_vars, ops_manager_inst) ops.create_ert_databases(opts) print "Ops manager is now available at ", ops.url if not hasattr(ops, 'install_elastic_runtime'): print ops, "Does not support deploying elastic runtime on < 1.7" return 0 ops.wait_for_deployed('p-bosh', timeout=timeout) ops.bosh("status") ops.install_elastic_runtime(opts, timeout) ops.configure_elastic_runtime(opts, timeout) ops.bosh("vms", ignore_error='No deployments') ops.wait_for_deployed('cf', timeout=timeout) ops.wait_while_install_running(timeout=timeout)
def connect(profile="default", region="default"): # if using default profile or role we dont need to pass creds if region != "default": session = Session(region_name=region) kms = session.client('kms') elif profile == "default": kms = client('kms') else: profile = get_profile(profile) session = Session(aws_access_key_id=profile['aws_access_key_id'], aws_secret_access_key=profile['aws_secret_access_key'], region_name=profile['region']) kms = session.client('kms') return kms
def locate_ami(aws_config): def contains(x, ys): for y in ys: if y not in x: return False return True with open(aws_config) as fh: cred = json.load(fh) session = Session(aws_access_key_id = cred["aws_access_key"], aws_secret_access_key = cred["aws_secret_key"], region_name = 'us-east-1') client = session.client('ec2') response = client.describe_images(Filters=[ {"Name": "owner-id", "Values": ["099720109477"]}, {"Name": "virtualization-type", "Values": ["hvm"]}, {"Name": "root-device-type", "Values": ["ebs"]}, {"Name": "architecture", "Values": ["x86_64"]}, #{"Name": "platform", "Values": ["Ubuntu"]}, #{"Name": "name", "Values": ["hvm-ssd"]}, #{"Name": "name", "Values": ["14.04"]}, ]) images = response['Images'] images = [i for i in images if contains(i['Name'], ('hvm-ssd', '14.04', 'server'))] images.sort(key=lambda x: x["CreationDate"], reverse=True) if len(images) == 0: print("Error: could not locate base AMI, exiting ....") sys.exit(1) print("Using {}".format(images[0]['Name'])) return images[0]['ImageId']
def _send_mail(to, subject, body, email_format='Text'): if settings.DEBUG: print((to, subject, body)) session = Session(aws_access_key_id=settings.SES_ACCESS_ID, aws_secret_access_key=settings.SES_SECRET_KEY, region_name='us-east-1') conn = session.client('ses') resp = conn.send_email( Source=settings.SENDER_EMAIL, Destination={'ToAddresses': [to]}, Message={ 'Subject': { 'Data': subject, }, 'Body': { email_format: { 'Data': body, }, }, }, ReplyToAddresses=[settings.SUPPORT_EMAIL], ReturnPath=settings.ADMINS[0][1] ) if not resp.get('MessageId'): rollbar.report_message('Got bad response from SES: %s' % repr(resp), 'error')
def __init__(self,config_area): self.aws_region = aws_config.get(config_area, "aws_region") self.aws_access_key_id = aws_config.get(config_area, "aws_access_key_id") self.aws_secret_access_key = aws_config.get(config_area, "aws_secret_access_key") session = Session(aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region) self.ec2 = session.resource('ec2',config=Config(signature_version='s3v4')) self.ec2_client = session.client('ec2',config=Config(signature_version='s3v4'))
def main(): """Main method for setup. Setup command line parsing, the AWS connection and call the functions containing the actual logic. """ logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) logging.getLogger('botocore').setLevel(logging.CRITICAL) parser = argparse.ArgumentParser(description='Script to automate \ snapshotting of EBS volumes') parser.add_argument('--aws-access-key-id', dest='aws_access_key_id', help='Specify a value here if you want to use a ' 'different AWS_ACCESS_KEY_ID than configured in the ' 'AWS CLI.') parser.add_argument('--aws-secret-access-key', dest='aws_secret_access_key', help='Specify a value here if you want to use a ' 'different AWS_SECRET_ACCESS_KEY than configured in ' 'the AWS CLI.') parser.add_argument('--profile', dest='profile_name', help='The AWS CLI profile to use. Defaults to the ' 'default profile.') parser.add_argument('--region', dest='region_name', default='us-east-1', help='The AWS region to connect to. Defaults to the ' 'one configured for the AWS CLI.') parser.add_argument('-n', '--num-backups', dest='num_backups', type=int, default=14, help='The number of backups for each volume to keep') parser.add_argument('-t', '--tag', dest='tag', default='Lifecycle:legacy', help='Key and value (separated by a colon) of a tag ' 'attached to instances whose EBS volumes should be ' 'backed up') args = parser.parse_args() session_args = {key: value for key, value in list(vars(args).items()) if key in ['aws_access_key_id', 'aws_secret_access_key', 'profile_name', 'region_name']} try: session = Session(**session_args) client = session.client('ec2') except BotoCoreError as exc: logging.error("Connecting to the EC2 API failed: %s", exc) sys.exit(1) tag_key_value = args.tag.split(':') if len(tag_key_value) != 2: logging.error("Given tag key value: \"%s\" is invalid.", args.tag) sys.exit(1) tag_key = tag_key_value[0] tag_value = tag_key_value[1] make_snapshots(client, tag_key, tag_value) delete_old_snapshots(client, tag_key, tag_value, args.num_backups)
def __init__(self, access_key, access_secret, region, bucket='ypanbucket'): session = Session(aws_access_key_id=access_key, aws_secret_access_key=access_secret, region_name=region) self.client = session.client('s3') self.bucket = bucket
def main(): if len(sys.argv) != 3: sys_exit(1) accepted_param = ['start', 'history', 'show', 'stop'] valid_param = False for arg in sys.argv: if arg in accepted_param: valid_param = True break if not valid_param: sys_exit(1) default_profile='aater-flux7' spot_fleet_id = sys.argv[2] session = Session(profile_name=default_profile) client = session.client('ec2') resource = DemoEc2(client) ret = False if sys.argv[1] == 'start': ret = resource.create_aws_spot_fleet() elif sys.argv[1] == 'show': ret = resource.describe_aws_spot_fleet(spot_fleet_id) elif sys.argv[1] == 'history': ret = resource.history_aws_spot_fleet(spot_fleet_id) elif sys.argv[1] == 'stop': ret = resource.terminate_aws_spot_fleet(spot_fleet_id) print ret
def __init__(self, pipeline_id, region=None, access_key_id=None, secret_access_key=None): self.pipeline_id = pipeline_id if not region: region = getattr(settings, 'AWS_REGION', None) self.aws_region = region if not access_key_id: access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None) self.aws_access_key_id = access_key_id if not secret_access_key: secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None) self.aws_secret_access_key = secret_access_key if self.aws_access_key_id is None: assert False, 'Please provide AWS_ACCESS_KEY_ID' if self.aws_secret_access_key is None: assert False, 'Please provide AWS_SECRET_ACCESS_KEY' if self.aws_region is None: assert False, 'Please provide AWS_REGION' boto_session = Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, ) self.client = boto_session.client('elastictranscoder')
def __init__(self, cfg): self.cfg = cfg aws = Session(aws_access_key_id=self.cfg.aws_key, aws_secret_access_key=self.cfg.aws_secret, region_name=self.cfg.aws_region) self.s3_client = aws.client('s3') self.bucket = aws.resource('s3').create_bucket(Bucket=self.cfg.s3_bucket)
def put_from_manifest( s3_bucket, s3_connection_host, s3_ssenc, s3_base_path, aws_access_key_id, aws_secret_access_key, manifest, bufsize, compress_data, concurrency=None, incremental_backups=False): """ Uploads files listed in a manifest to amazon S3 to support larger than 5GB files multipart upload is used (chunks of 60MB) files are uploaded compressed with lzop, the .lzo suffix is appended """ bucket = get_bucket( s3_bucket, aws_access_key_id, aws_secret_access_key, s3_connection_host) # Create a boto3 session session = Session(aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, region_name='us-east-1') client = session.client('s3') event_system = client.meta.events config = TransferConfig( multipart_threshold = MULTI_PART_UPLOAD_THRESHOLD, max_concurrency=4) transfer = S3Transfer(client, config) boto3.set_stream_logger('botocore', logging.INFO) manifest_fp = open(manifest, 'r') files = manifest_fp.read().splitlines() for f in files: file_path = s3_base_path + f print("boto3, upload file {0} to {1}: {2}".format(f, s3_bucket, file_path)) transfer.upload_file(f, s3_bucket, file_path)
def __init__(self,config_dict): # Create boto3 session for getting cloudwatch metrics session = Session(aws_access_key_id=config_dict['redshift_connection']['aws_access_key_id'] ,aws_secret_access_key=config_dict['redshift_connection']['aws_secret_access_key'] ,region_name=config_dict['redshift_connection']['region_name']) self.cw = session.client('cloudwatch') self.name_space = 'AWS/Redshift' self.metric_name = ['CPUUtilization', 'NetworkReceiveThroughput', 'NetworkTransmitThroughput', 'PercentageDiskSpaceUsed', 'ReadIOPS', 'ReadLatency', 'ReadThroughput', 'WriteIOPS', 'WriteLatency', 'WriteThroughput'] self.period = 60 self.statistics = ['Average'] self.unit = ['Percent', 'Bytes/Second', 'Bytes/Second', 'Percent', 'Count/Second', 'Seconds', 'Bytes/Second', 'Count/Second', 'Seconds', 'Bytes/Second'] self.log_identifier = 'cw_metrics' self.cluster_name = config_dict['redshift_connection']['cluster_name'] self.num_nodes = config_dict['redshift_connection']['num_nodes_cluster'] self.post_db = PostDB(db_queue=None,database_config=config_dict['database'])
def __init__(self): session = Session( aws_access_key_id=app.config.get("S3_AWS_ACCESS_KEY_ID"), aws_secret_access_key=app.config.get("S3_AWS_SECRET_ACCESS_KEY"), region_name=app.config.get("S3_REGION"), ) self._client = session.client("s3")
def handle(self, *args, **options): session = Session( aws_access_key_id=settings.S3_ACCESS_ID, aws_secret_access_key=settings.S3_SECRET_KEY, region_name=options["region"], ) to_reprocess = [] self.stdout.write("Processing bucket: %s" % settings.S3_LOGS_BUCKET) self.stdout.write("Downloading S3 manifest...") bucket = session.resource("s3").Bucket(name=settings.S3_LOGS_BUCKET) self.stdout.write("Analyzing bucket contents...") start_date = datetime.datetime.strptime(options["start"], "%Y-%m-%d") end_date = datetime.datetime.strptime(options["end"], "%Y-%m-%d") hits = 0 for f in bucket.objects.all(): hits += 1 if hits % 500 == 0: self.stdout.write(" - Processed %d log listings..." % hits) filename = f.key.split("/")[-1] if not filename: continue # Ignore CF files for now if filename.endswith(".gz"): continue datestamp = "-".join(filename.split("-")[:-1]) parsed_ds = datetime.datetime.strptime(datestamp, "%Y-%m-%d-%H-%M-%S") if parsed_ds < start_date or parsed_ds > end_date: continue to_reprocess.append(f.key) self.stdout.write("Finished analysis") self.stdout.write("%s logs need to be reprocessed" % len(to_reprocess)) if not to_reprocess: return if options["run"]: lambda_client = session.client("lambda") for f in to_reprocess: blob = json.dumps( {"Records": [{"s3": {"bucket": {"name": settings.S3_LOGS_BUCKET}, "object": {"key": f}}}]} ) lambda_client.invoke(FunctionName=options["function"], InvocationType="Event", Payload=blob) self.stdout.write("Lambda invoked for each log file. See CloudWatch for output") else: self.stdout.write("No additional action was performed. Use --run to actually reprocess")
def get_connection(as_resource=True, use_cache=True): """Returns a DynamoDBConnection even if credentials are invalid.""" global _cached_client global _cached_resource if use_cache: if as_resource and _cached_resource: return _cached_resource if not as_resource and _cached_client: return _cached_client config = get_config() session = Session( aws_access_key_id=config.aws_access_key_id, aws_secret_access_key=config.aws_secret_access_key, region_name='us-west-2', ) if config.host: endpoint_url = '%s://%s:%s' % ( 'https' if config.is_secure else 'http', # Host where DynamoDB Local resides config.host, # DynamoDB Local port (8000 is the default) config.port, # For DynamoDB Local, disable secure connections ) if not as_resource: _cached_client = session.client('dynamodb', endpoint_url=endpoint_url, verify=False) return _cached_client _cached_resource = session.resource('dynamodb', endpoint_url=endpoint_url, verify=False) return _cached_resource if not as_resource: _cached_client = session.client('dynamodb', verify=False) return _cached_client _cached_resource = session.resource('dynamodb', verify=False) return _cached_resource
def setup_s3_client(job_data): key_id = job_data['artifactCredentials']['accessKeyId'] key_secret = job_data['artifactCredentials']['secretAccessKey'] session_token = job_data['artifactCredentials']['sessionToken'] session = Session(aws_access_key_id=key_id, aws_secret_access_key=key_secret, aws_session_token=session_token) return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
class Context(object): def __init__(self, profile=None): self.session = Session(profile_name=profile) def getR53Client(self): credential = self.session.get_credentials() if not credential: raise RuntimeError("failed to get aws credential") return self.session.client('route53')
def test_bad_resource_name(self, list_mock, exist_mock, dir_mock): session = Session() session.client = mock.Mock() load_mock = mock.Mock() session.resource_factory.load_from_definition = load_mock self.loader.get_search_paths.return_value = ['search-path'] with self.assertRaises(NoVersionFound): # S3 is defined but not SQS! session.resource('sqs')
def create_federation_token(self, session: Session, user_name: str, user_policy: str, duration_sec: int = 129600, ) -> dict: sts = session.client('sts') token_resp = sts.get_federation_token(Name=user_name[:32], Policy=user_policy, DurationSeconds=duration_sec) return token_resp
def push_batch(bus, payloads): session = Session(aws_access_key_id=settings.SQS_ACCESS_ID, aws_secret_access_key=settings.SQS_SECRET_KEY, region_name='us-east-1') # TODO: make this an env variable? sns = session.client('sns') count = 0 for p in payloads: sns.publish( TopicArn=bus, Message=json.dumps(p) ) count += 1
def LogToKinesisFromWorker(KINESIS_LOGSTREAM, subject, message, *argv): session = Session(aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,region_name='eu-west-1') kinesisClient = session.client('kinesis') list = [] data = "" if argv is not None: for arg in argv: list.append(str(arg)) if len(list) > 0: data = ", ".join(list) kinesisBlob = json.dumps({"topic": "Log", "frequency": 0, "values": {"subject": subject, "time": datetime.datetime.now(timezone('Europe/Oslo')).strftime('%Y-%m-%d %H:%M:%S'),"message": message, "data": data}}) kinesisClientWrapper.get().put_record(StreamName=KINESIS_LOGSTREAM, Data=kinesisBlob, PartitionKey="LogOutput")
def test_create_resource_latest_version(self, list_mock, exist_mock, dir_mock): session = Session() session.client = mock.Mock() load_mock = mock.Mock() session.resource_factory.load_from_definition = load_mock self.loader.get_search_paths.return_value = ['search-path'] session.resource('sqs') self.loader.load_data.assert_called_with('sqs-2014-11-05.resources')
def test_no_search_path_resources(self, exist_mock, dir_mock): session = Session() session.client = mock.Mock() load_mock = mock.Mock() session.resource_factory.load_from_definition = load_mock self.loader.get_search_paths.return_value = [ 'search-path1', 'search-path2'] with self.assertRaises(NoVersionFound): # No resources are defined anywhere session.resource('sqs')
def main(arn, session_name): """aws sts assume-role --role-arn arn:aws:iam::00000000000000:role/example-role --role-session-name example-role""" client = boto3.client('sts') account_id = client.get_caller_identity()["Account"] response = client.assume_role(RoleArn=arn, RoleSessionName=session_name) session = Session( aws_access_key_id=response['Credentials']['AccessKeyId'], aws_secret_access_key=response['Credentials']['SecretAccessKey'], aws_session_token=response['Credentials']['SessionToken']) # switch to slave client = session.client('sts') account_id = client.get_caller_identity()["Account"] # TODO: do we need to make it loop over or accounts? slaveResponse = client.assume_role( RoleArn='arn:aws:iam::XXXXXXXXXXXXXX:role/SlaveCentralAdmin', RoleSessionName='SlaveCentralAdmin') slaveSession = Session( aws_access_key_id=slaveResponse['Credentials']['AccessKeyId'], aws_secret_access_key=slaveResponse['Credentials']['SecretAccessKey'], aws_session_token=slaveResponse['Credentials']['SessionToken']) os.environ["AWS_ACCESS_KEY_ID"] = slaveResponse['Credentials'][ 'AccessKeyId'] os.environ["AWS_SECRET_ACCESS_KEY"] = slaveResponse['Credentials'][ 'SecretAccessKey'] os.environ["AWS_SESSION_TOKEN"] = slaveResponse['Credentials'][ 'SessionToken'] print("slave account: ", account_id) client = slaveSession.client('ec2') regions = [ region['RegionName'] for region in client.describe_regions()['Regions'] ] print("starting cleanup...") listOfPolicies = ' '.join(policies_list()) for region in regions: print("python mugc.py -r %s -c %s" % (region, listOfPolicies)) os.system("python mugc.py -r %s -c %s" % (region, listOfPolicies))
class TestDynamoDB(unittest.TestCase): def setUp(self): self.http_response = AWSResponse(None, 200, {}, None) self.parsed_response = {} self.make_request_patch = mock.patch( 'botocore.endpoint.Endpoint.make_request' ) self.make_request_mock = self.make_request_patch.start() self.make_request_mock.return_value = ( self.http_response, self.parsed_response, ) self.session = Session( aws_access_key_id='dummy', aws_secret_access_key='dummy', region_name='us-east-1', ) def tearDown(self): self.make_request_patch.stop() def test_resource(self): dynamodb = self.session.resource('dynamodb') table = dynamodb.Table('MyTable') # Make sure it uses the high level interface table.scan(FilterExpression=Attr('mykey').eq('myvalue')) request = self.make_request_mock.call_args_list[0][0][1] request_params = json.loads(request['body'].decode('utf-8')) assert request_params == { 'TableName': 'MyTable', 'FilterExpression': '#n0 = :v0', 'ExpressionAttributeNames': {'#n0': 'mykey'}, 'ExpressionAttributeValues': {':v0': {'S': 'myvalue'}}, } def test_client(self): dynamodb = self.session.client('dynamodb') # Make sure the client still uses the botocore level interface dynamodb.scan( TableName='MyTable', FilterExpression='#n0 = :v0', ExpressionAttributeNames={'#n0': 'mykey'}, ExpressionAttributeValues={':v0': {'S': 'myvalue'}}, ) request = self.make_request_mock.call_args_list[0][0][1] request_params = json.loads(request['body'].decode('utf-8')) assert request_params == { 'TableName': 'MyTable', 'FilterExpression': '#n0 = :v0', 'ExpressionAttributeNames': {'#n0': 'mykey'}, 'ExpressionAttributeValues': {':v0': {'S': 'myvalue'}}, }
def post(self, request): data = json.loads(request.data['data']) data['name2'] = unidecode(data['name']) data['city_name'] = unidecode(City.objects.filter(id=data['city']).first().name) if data['phone'] == "": data['phone'] = None if(len(request.data) == 1): return Response("Image Required", status=status.HTTP_400_BAD_REQUEST) serializer = HallSerializer(data=data) if serializer.is_valid(): hall = serializer.save() hall.user = request.user images = iter(request.data) next(images) session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY, region_name="us-east-2") s3_client = session.client('s3', region_name="us-east-2", config=Config(signature_version='s3v4')) FILE_PATH = 'images/' + str(hall.id) + '/' responses = [] for image in images: try: name = uuid.uuid4() hall_image = HallImage() hall.photo_number = hall.photo_number + 1 hall_image.hall = hall hall_image.name = name hall_image.save() s3_object_name = FILE_PATH + str(name) + "/" + "image.jpg" response = s3_client.generate_presigned_post( Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=s3_object_name, ExpiresIn=3600 ) responses.append(response) except ClientError as e: logging.error(e) return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE) hall.save() return Response(responses, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def __get_kinesis(self): client = boto3.client('cognito-identity', self.__region) resp = client.get_id(IdentityPoolId=self.__identity_id) resp = client.get_credentials_for_identity( IdentityId=resp['IdentityId']) secretKey = resp['Credentials']['SecretKey'] accessKey = resp['Credentials']['AccessKeyId'] token = resp['Credentials']['SessionToken'] session = Session(aws_access_key_id=accessKey, aws_secret_access_key=secretKey, aws_session_token=token, region_name=self.__region) return session.client('kinesis')
def launch_lambda(queue, session_args, token, urls): session = Session(**session_args) config = Config(read_timeout = 60 * 5) client = session.client('lambda', config = config) lambda_args = {'token':token, 'queue':queue, 'urls':urls} resp = client.invoke(FunctionName = 'AutoScaleTest', #InvocationType = 'Event', #LogType = 'None', Payload = json.dumps(lambda_args).encode('utf-8')) #print(resp) #print(resp['Payload'].read()) return resp['Payload'].read()
def get_boto3_client(session: Session, service_name: ServiceName) -> BaseClient: """ Get boto3 client from `session`. Arguments: session -- boto3 session. service_name -- ServiceName instance. Returns: Boto3 client. """ return session.client(service_name.boto3_name) # type: ignore
def client(self): """Return the connection to SQS. Returns: boto3.SQS.Client: A connection to the SQS service. """ session = Session( aws_access_key_id=self.app.settings['AWS_ACCESS_KEY'], aws_secret_access_key=self.app.settings['AWS_ACCESS_SECRET'], region_name=self.app.settings['AWS_REGION_NAME'], ) return session.client('sqs')
def main(argv): args = get_args().parse_args(argv) if args.prepared_cfg is None and\ args.system_domain is None: print ("Either --prepared-cfg or " "(--system-domain and --stack-name) are required") return -1 fix_args(args) session = Session(profile_name=args.profile, region_name=args.region) elb = session.client("elb") genallhosts(elb, args) return 0
def test_create_resource_with_args(self, list_mock, exist_mock, dir_mock): session = Session() session.client = mock.Mock() session.resource_factory.load_from_definition = mock.Mock() self.loader.get_search_paths.return_value = ['search-path'] session.resource('sqs', verify=False) session.client.assert_called_with( 'sqs', aws_secret_access_key=None, aws_access_key_id=None, endpoint_url=None, use_ssl=True, aws_session_token=None, verify=False, region_name=None, api_version=None)
def changeMonitoringState(self, key, secret, instanceId, region, to): try: session = Session(aws_access_key_id=key, aws_secret_access_key=secret) client = session.client('ec2', region_name=region) if to == 'enable': response = client.monitor_instances(InstanceIds=[instanceId]) else: response = client.unmonitor_instances(InstanceIds=[instanceId]) except Exception as e: print(e) return e return jsonify({'result': 'success', 'data': response})
def __init__(self, __config_path): self.__config_path = __config_path config = ConfigParser() config.read(__config_path) session = Session(aws_access_key_id=config.get('Credentials', 'aws_access_key_id'), aws_secret_access_key=config.get( 'Credentials', 'aws_secret_access_key')) self.s3_client = session.client('s3') self.s3_resource = session.resource('s3')
def get_s3_client(cls, conn: S3Connection): """ :param conn: an S3Connection object :return: a boto3 s3 client """ assert conn.is_valid assert conn.is_active session = Session(aws_access_key_id=conn.access_key, aws_secret_access_key=conn.secret_key, region_name=conn.region_name) s3 = session.client('s3') return s3
class S3Client(object): def __init__(self, profile: str, bucket: str): self._profile = 'vueflasksample' self._session = Session(profile_name=self._profile) self._client = self._session.client('s3') self._bucket_name = bucket def getList(self, prefix: str): response = self._client.list_objects(Bucket=self._bucket_name, Prefix=prefix) if 'Contents' in response: # 該当する key がないと response に 'Contents' が含まれない keys = [content['Key'] for content in response['Contents']] return keys
def _setup_session(self): ''' Setup Boto3 session ''' session = Session(aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key) if not self.account_id: self.account_id = session.client('sts').get_caller_identity().get( 'Account') self.credentials[self.account_id] = (self.access_key_id, self.secret_access_key, None) self.sessions[self.account_id] = session self.sqs_res.pop(self.account_id, None)
def test_no_search_path_resources(self, exist_mock, dir_mock): session = Session() session.client = mock.Mock() load_mock = mock.Mock() session.resource_factory.load_from_definition = load_mock self.loader.get_search_paths.return_value = [ 'search-path1', 'search-path2' ] with self.assertRaises(NoVersionFound): # No resources are defined anywhere session.resource('sqs')
def get_s3_client(): # type: () -> Session.client with session_lock: global __aws_session if __aws_session is None: __aws_session = Session() return __aws_session.client( service_name='s3', config=Config( retries={'max_attempts': MAX_RETRY}, read_timeout=DEFAULT_READ_TIMEOUT ) )
def main(argv): args = get_args().parse_args(argv) if args.prepared_cfg is None and\ args.system_domain is None: print("Either --prepared-cfg or " "(--system-domain and --stack-name) are required") return -1 fix_args(args) session = Session(profile_name=args.profile, region_name=args.region) elb = session.client("elb") genallhosts(elb, args) return 0
def assume_role(arn, session_name): client = boto3.client('sts') account_id = client.get_caller_identity()["Account"] user_arn = client.get_caller_identity()["Arn"] print(f'{role_arn} passed as argument. Assuming role...') response = client.assume_role(RoleArn=arn, RoleSessionName=session_name) session = Session(aws_access_key_id=response['Credentials']['AccessKeyId'], aws_secret_access_key=response['Credentials']['SecretAccessKey'], aws_session_token=response['Credentials']['SessionToken']) client = session.client('sts') account_id = client.get_caller_identity()["Account"] user_arn = client.get_caller_identity()["Arn"] print(f'Role "{user_arn}" assumed, commencing keypair activities...')
def __init__(self, config_area): self.aws_region = aws_config.get(config_area, "aws_region") self.aws_access_key_id = aws_config.get(config_area, "aws_access_key_id") self.aws_secret_access_key = aws_config.get(config_area, "aws_secret_access_key") session = Session(aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region) self.ec2 = session.resource('ec2', config=Config(signature_version='s3v4')) self.ec2_client = session.client( 'ec2', config=Config(signature_version='s3v4'))
def get_image_from_ssm_parameter(ssm_parameter: Parameter, session: Session = None) -> Union[str, None]: session = get_session(session) client = session.client("ssm") try: return client.get_parameter( Name=ssm_parameter.Default)["Parameter"]["Value"] except (client.exceptions.InvalidKeyId, client.exceptions.ParameterNotFound): pass except ClientError: pass return None
def get_ssm_client(region_name, role_arn=None, session_name=None): if role_arn: sts_client = boto3.client('sts') response = sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name) session = Session( aws_access_key_id=response['Credentials']['AccessKeyId'], aws_secret_access_key=response['Credentials']['SecretAccessKey'], aws_session_token=response['Credentials']['SessionToken']) client = session.client("ssm", region_name=region_name) else: client = boto3.client("ssm", region_name=region_name) return client
def createSecurityGroup(self, key, secret, region, name, description): try: session = Session(aws_access_key_id=key, aws_secret_access_key=secret) client = session.client('ec2', region_name=region) securityGroup = client.create_security_group( Description=description, GroupName=name, ) except Exception as e: print(e) return e return jsonify({'result': 'success', 'data': securityGroup})
def sessions(self, profile): for profile_name in [profile] if profile else self.read_aws_profile_names(): try: session = Session(profile_name=profile_name) account_id = session.client("sts").get_caller_identity()["Account"] if account_id not in self._accounts_processed: self._accounts_processed.append(account_id) yield (session, account_id, profile_name) except ClientError as e: if e.response["Error"]["Code"] in AwsApiHelper.CRED_ERRORS: logging.warning(f'{profile_name} {e.response["Error"]["Code"]}. Skipped') else: raise
def delete(self, using=None, keep_parents=False): """ Overwrite the default delete method so the bucket would be deleted when the model instance is deleted """ try: session = Session(aws_access_key_id=str(self.access_key), aws_secret_access_key=str(self.secret_key), region_name=str(self.region_name)) s3 = session.client('s3') response = s3.delete_bucket(Bucket=self.connection_id) except Exception as e: pass super().delete()
def upload_to_s3(bucket,key,local_file): ########### MODIFY THIS ########## #key, secret [AmazonS3FullAccess] aws_access_key_id = '' aws_secret_access_key = '' region_name = 'ap-east-1' ############################## from boto3.session import Session session = Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name) s3 = session.client("s3") #upload to s3 clean_key = urllib.unquote_plus(key.encode('utf8')) s3.upload_file(local_file, bucket,clean_key)
def ec2_instance_types(): """Return list of possible AWS EC2 instance types.""" session = Session() ec2 = session.client("ec2") response = ec2.describe_instance_types() instance_types = response["InstanceTypes"] while "NextToken" in response: nextToken = response["NextToken"] if "NextToken" in response else None response = ec2.describe_instance_types(NextToken=nextToken) instance_types.extend(response["InstanceTypes"]) return sorted(instance_types, key=lambda i: i["InstanceType"])
def get_credential(): if os.environ.get('EXEC_ENV') == 'TEST': session = Session(profile_name='local-dynamodb-user') sts = session.client('sts') else: sts = boto3.client('sts') role_arn = os.environ['ROLE_ARN'] role = sts.assume_role( RoleArn=role_arn, RoleSessionName='test', ) return role['Credentials']
def _assume_role( self, profile_name, region_name, role_arn, role_session_name, duration_seconds ): # MFA is not supported. If you want to use MFA, create a configuration file. # http://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider session = Session(profile_name=profile_name, **self._session_kwargs) client = session.client("sts", region_name=region_name, **self._client_kwargs) response = client.assume_role( RoleArn=role_arn, RoleSessionName=role_session_name, DurationSeconds=duration_seconds, ) return response["Credentials"]
def create(args, config): creds = get_aws_credentials() os.mkdir('.rollout') os.mkdir('.rollout/keys') session = Session(aws_access_key_id=creds.get('access_key'), aws_secret_access_key=creds.get('secret_key')) region = ask('What region do you want to use?', 'us-west-1') ami = ask('What AMI do you want to use?', 'ami-09d2fb69') size = ask('What instance type do you want to use?', 't2.micro') assert size in EC2_INSTANCE_TYPES ec2 = session.resource('ec2', region_name=region) ec2_client = session.client('ec2', region_name=region) keyname = create_key(ec2_client, args.service, region) subnets = get_subnets(ec2_client) instance = ec2.create_instances( ImageId=ami, MinCount=1, MaxCount=1, KeyName=keyname, InstanceType=size, SubnetId=subnets[0]['SubnetId'], ) service = args.service public_dns_name = wait_for_public_dns_name(ec2, instance[0].id) data = { 'hosts': [{ 'username': '******', 'instanceId': instance[0].id, 'host': public_dns_name, 'keyfile': get_keypath(keyname), }], 'services': { service: { 'default': 'true', } }, } yaml.dump(data, open('.rollout/config.yaml', 'w'), default_flow_style=False) print('Your instance is being created.') print('Instance Id: {}'.format(instance[0].id))
def lambda_handler(event, context): """ 実際にLambdaで呼ばれる関数 s3のメールオブジェクトファイルの格納を受けて打刻までをハンドリング """ # === 各種keyたちのinitialize === # クレデンシャルの読み込み with open('./aws_credentials.json', 'r', encoding='utf-8') as f: credentials = json.load(f) session = Session( aws_access_key_id=credentials['aws_access_key_id'], aws_secret_access_key=credentials['aws_secret_access_key'], region_name=credentials['region_name']) s3 = session.client('s3') # 再帰事故防止でバケットを使いまわさない前提 INPUT_BUCKET_NAME = credentials['input_bucket_name'] OUTPUT_BUCKET_NAME = credentials['output_bucket_name'] # === eventからs3ファイル情報抜いてs3にfetch、BytesをMIMEとしてParserに投げる === # eventから今回のMIMEファイルのオブジェクトキーを引っこ抜き MIME_OBJECT_KEY = event['Records'][0]['s3']['object']['key'] # s3にfetchしてパースする fetch_response = fetch_s3_object(s3, INPUT_BUCKET_NAME, MIME_OBJECT_KEY) parser_response = MimeParser( bytes_data=fetch_response['Body'].read()).get_mail_body_content() print(parser_response) # === 解析したメールobjをもとに出勤or退勤を見極めて打刻する === if parser_response['from_address'] == credentials['mail_from']: # 送信元が合っているか punch_type = seek_mail_body(parser_response['body']) if punch_type == 1: # 出勤がmatch return_status_text = work_punch(punch_type='work_in') notice_to_slack(credentials['webhook_url'], return_status_text) elif punch_type == 2: # 退勤がmatch return_status_text = work_punch(punch_type='work_out') notice_to_slack(credentials['webhook_url'], return_status_text) else: # どちらもmatchしなかった notice_to_slack(credentials['webhook_url'], 'No match, No Punch. Confirm your mail.') else: # 送信元が違った notice_to_slack(credentials['webhook_url'], 'Different from address. Confirm your mail.')
class TestDynamoDB(unittest.TestCase): def setUp(self): self.http_response = requests.models.Response() self.http_response.status_code = 200 self.parsed_response = {} self.make_request_patch = mock.patch( 'botocore.endpoint.Endpoint.make_request') self.make_request_mock = self.make_request_patch.start() self.make_request_mock.return_value = ( self.http_response, self.parsed_response) self.session = Session( aws_access_key_id='dummy', aws_secret_access_key='dummy', region_name='us-east-1') def tearDown(self): self.make_request_patch.stop() def test_resource(self): dynamodb = self.session.resource('dynamodb') table = dynamodb.Table('MyTable') # Make sure it uses the high level interface table.scan(FilterExpression=Attr('mykey').eq('myvalue')) request = self.make_request_mock.call_args_list[0][0][1] request_params = json.loads(request['body']) self.assertEqual( request_params, {'TableName': 'MyTable', 'FilterExpression': '#n0 = :v0', 'ExpressionAttributeNames': {'#n0': 'mykey'}, 'ExpressionAttributeValues': {':v0': {'S': 'myvalue'}}} ) def test_client(self): dynamodb = self.session.client('dynamodb') # Make sure the client still uses the botocore level interface dynamodb.scan( TableName='MyTable', FilterExpression='#n0 = :v0', ExpressionAttributeNames={'#n0': 'mykey'}, ExpressionAttributeValues={':v0': {'S': 'myvalue'}} ) request = self.make_request_mock.call_args_list[0][0][1] request_params = json.loads(request['body']) self.assertEqual( request_params, {'TableName': 'MyTable', 'FilterExpression': '#n0 = :v0', 'ExpressionAttributeNames': {'#n0': 'mykey'}, 'ExpressionAttributeValues': {':v0': {'S': 'myvalue'}}} )
class S3(object): ENCODING = 'utf-8' def __init__(self, aws_access_key_id, aws_secret_access_key, region_name): self.session = Session( aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, region_name = region_name, ) def get(self, bucketname, keyname): obj = self._get_bucket_object(bucketname, keyname) response = obj.get()['Body'] return response def list(self, bucketname, prefix): s3 = self.session.resource('s3') bucket = s3.Bucket(bucketname) for obj in bucket.objects.filter(Prefix=prefix): yield obj.key def put(self, bucketname, keyname, content, content_type): if isinstance(content, str): content = content.encode(self.ENCODING) if not isinstance(content, bytes): content = json.dumps(content).encode(self.ENCODING) obj = self._get_bucket_object(bucketname, keyname) response = obj.put( Body=content, ContentEncoding=self.ENCODING, ContentType=content_type, ) return response def exists(self, bucketname, keyname): try: s3 = self.session.client('s3') s3.head_object(Bucket=bucketname, Key=keyname) except ClientError: return False else: return True def _get_bucket_object(self, bucketname, keyname): s3 = self.session.resource('s3') bucket = s3.Bucket(bucketname) obj = bucket.Object(keyname) return obj
def main(profile, region): start = time() try: accounts_processed = [] profile_names = [profile] if profile else read_aws_profile_names() for profile_name in profile_names: session = Session(profile_name=profile_name) account_id = session.client("sts").get_caller_identity()["Account"] if account_id in accounts_processed: continue accounts_processed.append(account_id) process_account(session, account_id, region) finally: logging.info(f"Total execution time: {time() - start}s")
def invoke_endpoint(data): data = {"data": data} session = Session() runtime = session.client("runtime.sagemaker") response = runtime.invoke_endpoint( EndpointName='textrank', ContentType="application/json", Body=json.dumps(data), ) result = json.loads(response["Body"].read()) # print ("<<<< result: ",''.join(result['res']['摘要列表'])) return ''.join(result['res']['摘要列表'])