def __init__( self, sender: str, recipients: list, subject: str = None, access_key: str = None, secret_key: str = None, region: str = None, ): Handler.__init__(self) self.sender = sender self.recipients = recipients self.subject = subject self.access_key = access_key self.secret_key = secret_key self.region = region if access_key and secret_key and region: self.ses_client = boto3_client( 'ses', region_name=self.region, aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) else: self.ses_client = boto3_client('ses')
def __init__( self, aws_access_key_id: str = None, aws_secret_access_key: str = None, region_name: str = None, cluster: str = None, subnets: list = None, security_groups: list = None, repository_credentials: str = None, assign_public_ip: str = None, task_cpu: str = None, task_memory: str = None, ) -> None: super().__init__() from boto3 import client as boto3_client # Config used for boto3 client initialization aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") aws_secret_access_key = aws_secret_access_key or os.getenv( "AWS_SECRET_ACCESS_KEY") region_name = region_name or os.getenv("REGION_NAME") # Agent task config self.cluster = cluster or os.getenv("CLUSTER", "default") self.subnets = subnets or [] self.security_groups = security_groups or [] self.repository_credentials = repository_credentials or os.getenv( "REPOSITORY_CREDENTIALS") self.assign_public_ip = assign_public_ip or os.getenv( "ASSIGN_PUBLIC_IP", "ENABLED") self.task_cpu = task_cpu or os.getenv("TASK_CPU", "256") self.task_memory = task_memory or os.getenv("TASK_MEMORY", "512") # Client initialization self.boto3_client = boto3_client( "ecs", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name, ) # Look for default subnets with `MapPublicIpOnLaunch` disabled if not subnets: ec2 = boto3_client( "ec2", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region_name, ) for subnet in ec2.describe_subnets()["Subnets"]: if not subnet.get("MapPublicIpOnLaunch"): self.subnets.append(subnet.get("SubnetId"))
def log_current_state(**execution_info): boto3_client('cloudwatch').put_metric_data( Namespace='LambdaWarmer', MetricData=[ dict(MetricName='WarmStart' if execution_info['is_warm'] else 'ColdStart', Dimensions=[ dict(Name='By Function Name', Value=execution_info['function_name']) ], Unit='None', Value=1) ])
def update_station(event, context): def playerfm(url): crawl_url = requests.get(url) soup = BeautifulSoup(crawl_url.content, 'html.parser') section = soup.find("section", {"class": re.compile(r"series-episodes-list")}) list_url = section['data-url'] list_url = re.sub("&at=\\d+$", "", list_url) return list_url dynamodb = boto3.resource('dynamodb', os.environ['REGION']) table = dynamodb.Table(os.environ['STATIONS_TABLE']) stations = table.scan(FilterExpression=Attr('list_url').not_exists()) for station in stations['Items']: if 'list_url' not in station: spider_name = station['spider'] list_url = locals()[spider_name](station['url']) station['list_url'] = list_url table.put_item(Item=station) msg = {"key": "new_invocation"} lambda_client = boto3_client('lambda') invoke_response = lambda_client.invoke( FunctionName="td-podcast-update-episode", InvocationType='Event', Payload=json.dumps(msg)) return 0
def login(cls, token_name, login_token, login_secret_token, duration=86400): login_result = OpenResource.invoke( '/session/login', { 'token_name': token_name, 'login_token': login_token, 'login_secret_token': login_secret_token, 'duration': duration }) res = boto3_client("sts").assume_role_with_web_identity( RoleArn=ApiConfig.aws_role_arn(), RoleSessionName=ApiConfig.aws_api_gw_service_name(), WebIdentityToken=login_result['cognito_token'], DurationSeconds=43200) return Session(credentials=Credentials( res['Credentials']['AccessKeyId'], res['Credentials']['SecretAccessKey'], res['Credentials']['SessionToken']), session_id=login_result['session_id'], api_key=login_result['api_key'], identity_id=login_result['cognito_identity_id'], user=login_result['user'])
def setup(self, flow: "Flow") -> None: # type: ignore """ Register the task definition if it does not already exist. Args: - flow (Flow): the Flow object """ from boto3 import client as boto3_client from botocore.exceptions import ClientError boto3_c = boto3_client( "ecs", aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, aws_session_token=self.aws_session_token, region_name=self.region_name, ) task_definition_kwargs = self._render_task_definition_kwargs(flow) try: existing_task_definition = boto3_c.describe_task_definition( taskDefinition=self.task_definition_kwargs.get( "family"))["taskDefinition"] self._validate_task_definition(existing_task_definition, task_definition_kwargs) except ClientError: boto3_c.register_task_definition(**task_definition_kwargs)
def request_audit(event, context, references={}): """ Logs the receipt of a shipment event request. This builds a SNS message from the arguments and publishes it to the stage-specific received-shipment-requests topic. Args: references: The ids and other data points that we want to be searchable """ logging.info(event) event = sanitize(event) receipt_log = { "request": context.function_name if context.function_name else "", "requestId": get_request_id(event), "body": json.dumps(event), "references": json.dumps(references), } aws_stage = os.environ["AWS_STAGE"] try: sns_client = boto3_client("sns") topic_name = f"{aws_stage}-received-requests" # Get the topic ARN by creating it if necessary. topic_arn = sns_client.create_topic(Name=topic_name)["TopicArn"] sns_client.publish(TopicArn=topic_arn, Message=json.dumps(receipt_log)) except ClientError as ex: logging.error(f"Could not log received shipment: {ex}")
def new_file(self, *args, **kwargs): super().new_file(*args, **kwargs) extension = pathlib.Path(self.file_name).suffix time_stamp = f'{timezone.now().strftime("%Y%m%d%H%M%S")}' self.new_file_name = f"{S3_ROOT_DIRECTORY}{self.file_name.replace(extension, '')}_{time_stamp}{extension}" extra_kwargs = {} if AWS_S3_ENDPOINT_URL: extra_kwargs['endpoint_url'] = AWS_S3_ENDPOINT_URL self.s3_client = boto3_client( "s3", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name=AWS_REGION, **extra_kwargs, ) self.parts = [] self.part_number = 1 self.s3_key = f"chunk_upload_{str(uuid.uuid4())}" self.multipart = self.s3_client.create_multipart_upload( Bucket=AWS_STORAGE_BUCKET_NAME, Key=self.s3_key, ContentType=self.content_type, ) self.upload_id = self.multipart["UploadId"] self.executor = ThreadedS3ChunkUploader( self.s3_client, AWS_STORAGE_BUCKET_NAME, key=self.s3_key, upload_id=self.upload_id, )
def launch_lambda(function_name, msg): """ Fire and forget invoking this lambda so we can respond to the user without waiting. """ # func_name: context.function_name lambda_client = boto3_client('lambda','us-east-1') invoke_response = lambda_client.invoke(FunctionName=function_name,InvocationType='Event',Payload=json.dumps(msg))
def get_permissions_by_user_id(user_id): sql = f''' select pt.name from roles.permission_type as pt inner join roles.permission as p on p.permission_type_id = pt.id inner join roles.permission_role as pr on pr.permission_id = p.id inner join roles.role as r on r.id = pr.role_id inner join users.users as u on u.role_id = r.id where u.id = '{user_id}' ''' msg = {'body': {'action': 'run', 'queries': [sql]}} invoke_response = boto3_client( 'lambda', aws_access_key_id=get_enviroment_var('USER_ACCESS'), aws_secret_access_key=get_enviroment_var('USER_SECRET')).invoke( FunctionName=DB_LAMBDA, InvocationType='RequestResponse', Payload=json.dumps(msg)) response = json.loads(invoke_response['Payload'].read()) if response['status_code'] == 200: body = json.loads(response['body']) return body[0] else: return None
def fetchGeoIPData(address): ''' Function to Fetch GeoIP data for an IP address Args: address(string): IP address to be looked up in the database Returns: geoip response from Lambda Function ''' lambda_client = boto3_client('lambda') response = lambda_client.invoke( FunctionName="CentralizedLogging_FunctionToFetchGeoIPData", InvocationType='RequestResponse', Payload=json.dumps(address)) string_response = response["Payload"].read().decode('utf-8') parsed_response = json.loads(string_response) try: if (parsed_response['statusCode'] == 200): geoipresponse = {} geoipresponse = json.loads(parsed_response["body"]) return geoipresponse except: pass return None
def publish_kinesis(stream, region, data, pk): print("Publishing payload to Kinesis") kinesis = boto3_client("kinesis", region_name=region) res = kinesis.put_record(StreamName=stream, Data=json_dumps(data), PartitionKey=pk) print(f"Kinesis response: {res}")
def db_upsert_event(game_id, event_period): current_ts = int(time.time()) current_dt = datetime.datetime.fromtimestamp(current_ts) ttl_dt = current_dt + datetime.timedelta(days=90) ttl_ts = int(ttl_dt.timestamp()) dynamo_client = boto3_client("dynamodb") response = dynamo_client.update_item( TableName='nhl-shotmaps-tracking', Key={'gamePk': { 'N': game_id }}, UpdateExpression= "SET lastPeriodProcessed = :period, #ts = :ts, tsPlusTTL = :ts_ttl", ExpressionAttributeNames={"#ts": "timestamp"}, ExpressionAttributeValues={ ':period': { 'N': str(event_period) }, ':ts': { 'N': str(current_ts) }, ':ts_ttl': { 'N': str(ttl_ts) } }, ReturnValues="ALL_NEW") logging.info("DynamoDB Record Updated: %s", response)
def get_aws_client(service_name, region_name=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, endpoint_url=None, config=None): """ Utility method to get aws (boto3) client instance. :param config: :param aws_session_token: :param endpoint_url: :param aws_secret_access_key: :param aws_access_key_id: :param region_name: :param service_name: :param region: :return: client """ assert util.is_not_empty(region_name), \ "AWS Region cannot be empty." assert util.is_not_empty(service_name), \ "client_type cannot be empty." from boto3 import client as boto3_client return boto3_client(service_name, region_name=region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, endpoint_url=endpoint_url, config=config)
def __init__( # type: ignore self, name: str = None, labels: Iterable[str] = None, aws_access_key_id: str = None, aws_secret_access_key: str = None, aws_session_token: str = None, region_name: str = None, **kwargs) -> None: super().__init__(name=name, labels=labels) from boto3 import client as boto3_client # Config used for boto3 client initialization aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") aws_secret_access_key = aws_secret_access_key or os.getenv( "AWS_SECRET_ACCESS_KEY") aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN") region_name = region_name or os.getenv("REGION_NAME") # Parse accepted kwargs for definition and run self.task_definition_kwargs, self.task_run_kwargs = self._parse_kwargs( kwargs) # Client initialization self.boto3_client = boto3_client( "ecs", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, )
def setUp(self): self.s3 = boto3_client('s3') self.bucket_name = 'test2207' self.local_path = 'tests' if 'tmp' not in os.listdir(): os.mkdir('tmp') with open('tmp/test1', 'w') as f: f.write('foo')
def _boto3_client(self): # type: ignore from boto3 import client as boto3_client return boto3_client( "s3", aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, aws_session_token=self.aws_session_token, )
def client(self, service_name, **kwargs): if service_name not in self._service_endpoint_mapping: raise Exception('%s is not supported by this mock session.' % (service_name)) return boto3_client( service_name, endpoint_url=self._service_endpoint_mapping[service_name], aws_access_key_id=self.aws_access_key_id, region_name=self.region_name, aws_secret_access_key=self.aws_secret_access_key, verify=False)
def invoke_lambda( name, headers=None, body=None, pathParameters=None, queryStringParameters=None, invoke_type="RequestResponse", full_payload=None, context={}, timeout=300, retries=4, ): # Need to find out the stage we are running in # so we call the proper lambda in this stage aws_stage = os.environ["AWS_STAGE"] if full_payload: payload = full_payload if "headers" not in payload: payload["headers"] = {} else: payload = {} payload["body"] = body payload["headers"] = headers payload["pathParameters"] = pathParameters payload["queryStringParameters"] = queryStringParameters if context: context = json.dumps(context).encode("utf-8") context = base64.b64encode(context).decode("utf-8") config = Config(connect_timeout=timeout, read_timeout=timeout, retries={"max_attempts": retries}) # Invoke the function lambda_client = boto3_client("lambda", config=config) response = (lambda_client.invoke( FunctionName=f"{aws_stage}-{name}", InvocationType=invoke_type, Payload=json.dumps(payload, default=datetime_convert), ClientContext=json.dumps(context), ) if context else lambda_client.invoke( FunctionName=f"{aws_stage}-{name}", InvocationType=invoke_type, Payload=json.dumps(payload, default=datetime_convert), )) if invoke_type == "RequestResponse": string_response = response["Payload"].read().decode("utf-8") parsed_response = json.loads(string_response) return parsed_response
def _perform_fan_out_warm_up_calls(config, correlation_id, concurrency): function_name = '{function_name}:{function_version}'.format(**LAMBDA_INFO) base_payload = { config['flag']: True, '__WARMER_CONCURRENCY__': concurrency, '__WARMER_CORRELATION_ID__': correlation_id } for i in range(1, concurrency): try: invocation_payload = json.dumps(dict(base_payload, __WARMER_INVOCATION__=(i + 1)), sort_keys=True) boto3_client('lambda').invoke( FunctionName=function_name, InvocationType='Event' if i < concurrency - 1 else 'RequestResponse', Payload=invocation_payload) except Exception as e: logger.error( 'Failed to invoke "{}" with event "{}" during warm up fan out. Error: "{}"' .format(function_name, invocation_payload, str(e)))
def execute( # type: ignore self, storage: "Docker", flow_location: str, **kwargs: Any) -> None: """ Run the Fargate task that was defined for this flow. Args: - storage (Storage): the Storage object that contains the flow - flow_location (str): the location of the Flow to execute - **kwargs (Any): additional keyword arguments to pass to the runner """ from boto3 import client as boto3_client flow_run_id = prefect.context.get("flow_run_id", "unknown") container_overrides = [{ "name": "flow-container", "environment": [ { "name": "PREFECT__CLOUD__AUTH_TOKEN", "value": config.cloud.agent.auth_token or config.cloud.auth_token, }, { "name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": flow_run_id }, { "name": "PREFECT__CONTEXT__IMAGE", "value": storage.name }, { "name": "PREFECT__CONTEXT__FLOW_FILE_PATH", "value": flow_location, }, ], }] boto3_c = boto3_client( "ecs", aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, aws_session_token=self.aws_session_token, region_name=self.region_name, ) boto3_c.run_task(overrides={"containerOverrides": container_overrides}, launchType="FARGATE", **self.task_run_kwargs)
def _upload_file(file_path: str, uploader: _Uploader): """ Upload a file to S3. Parameters ---------- file_path: str The path to the file on the local computer. uploader: _Uploader Holds the parameters returned by the upload request. Returns ------- _Uploader The input uploader object with its s3_version field now populated. """ additional_s3_opts = { 'use_ssl': uploader.s3_use_ssl, 'config': Config(s3={'addressing_style': uploader.s3_addressing_style}) } if uploader.s3_endpoint_url is not None: additional_s3_opts['endpoint_url'] = uploader.s3_endpoint_url s3_client = boto3_client( 's3', region_name=uploader.region_name, aws_access_key_id=uploader.aws_access_key_id, aws_secret_access_key=uploader.aws_secret_access_key, aws_session_token=uploader.aws_session_token, **additional_s3_opts) with open(file_path, 'rb') as f: try: # NOTE: This is only using the simple PUT logic, not the more sophisticated # multipart upload approach that is also available (providing parallel # uploads, etc). upload_response = s3_client.put_object( Bucket=uploader.bucket, Key=uploader.object_key, Body=f, Metadata={"X-Citrine-Upload-Id": uploader.upload_id}) except ClientError as e: raise RuntimeError( "Upload of file {} failed with the following " "exception: {}".format(file_path, e)) uploader.s3_version = upload_response['VersionId'] return uploader
def check_db_for_last_period(game_id): dynamo_client = boto3_client("dynamodb") response = dynamo_client.get_item(TableName='nhl-shotmaps-tracking', Key={'gamePk': { 'N': game_id }}) try: item = response['Item'] last_period_processed = int(item['lastPeriodProcessed']['N']) except KeyError: logging.info("NEW Game Detected - record does not exist yet.") last_period_processed = 0 return last_period_processed
def execute( # type: ignore self, flow: "Flow", **kwargs: Any) -> None: """ Run the Fargate task that was defined for this flow. Args: - flow (Flow): the Flow object - **kwargs (Any): additional keyword arguments to pass to the runner """ from boto3 import client as boto3_client flow_run_id = prefect.context.get("flow_run_id", "unknown") container_overrides = [{ "name": "flow-container", "environment": [ { "name": "PREFECT__CLOUD__AUTH_TOKEN", "value": config.cloud.agent.auth_token or config.cloud.auth_token, }, { "name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": flow_run_id }, { "name": "PREFECT__CONTEXT__IMAGE", "value": get_flow_image(flow) }, ], }] boto3_c = boto3_client( "ecs", aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, aws_session_token=self.aws_session_token, region_name=self.region_name, ) boto3_c.run_task( overrides={"containerOverrides": container_overrides}, launchType=self.launch_type, **self.task_run_kwargs, )
def upload_to_s3( file: Path, bucket: str = f"arr-packit-{getenv('DEPLOYMENT', 'dev')}") -> None: """Upload a file to an S3 bucket. Args: file: File to upload. bucket: Bucket to upload to. """ s3_client = boto3_client("s3") try: logger.info(f"Uploading {file} to S3 ({bucket})") s3_client.upload_file(str(file), bucket, file.name) except ClientError as e: logger.error(e) raise
def upload(lambda_function='lambda_function.py', dependencies=[]): lambda_client = boto3_client('lambda') import zipfile zf = FUNCTION_NAME + ".zip" logger.info('Zipping lambda project files') with zipfile.ZipFile(zf, 'w') as code_archive: code_archive.write(lambda_function) for filename in dependencies: zip_path(filename, code_archive) try: logger.info('Uploading code') with open(zf, 'rb') as code_archive: data = code_archive.read() lambda_client.update_function_code(FunctionName=FUNCTION_NAME, ZipFile=data) except Exception as e: logger.error(e)
def client(self, service_name, **kwargs): """ Mock boto3 client If **kwargs are provided they will passed through to boto3.client unless they are contained already within protected_kwargs which are set with priority Returns boto3.resources.factory.s3.ServiceClient object """ if service_name not in self._service_endpoint_mapping: raise Exception('%s is not supported by this mock session.' % (service_name)) protected_kwargs = { **self.common_protected_kwargs, 'service_name': service_name, 'endpoint_url': self._service_endpoint_mapping[service_name] } return boto3_client(**{**kwargs, **protected_kwargs})
def lambda_handler(content, is_json=True): lambda_client = boto3_client('lambda') print('in lambda_handler') print(json.dumps(content)) if is_json: content = json.dumps(content) invoke_response = lambda_client.invoke( FunctionName="dev-save_to_ipfs", # InvocationType='Event', InvocationType='RequestResponse', Payload=content) # Payload=json.dumps(msg)) print(invoke_response) if invoke_response['Payload'] is not None: data = invoke_response['Payload'].read().decode() return data else: return invoke_response
def get_owner_from_cloud_trail(self): try: client = boto3_client('cloudtrail', region_name=self._instance["Placement"] ["AvailabilityZone"][:-1]) result = client.lookup_events(LookupAttributes=[{ 'AttributeKey': 'ResourceName', 'AttributeValue': self._instance['InstanceId'] }]) for event in result["Events"]: if event['EventName'] == 'RunInstances': return event["Username"] except Exception as exc: # pylint: disable=broad-except LOGGER.warning( "Error occurred when trying to find an owner for '%s' in CloudTrail: %s", self._instance['InstanceId'], exc) return None
def lambda_handler(event, context): lambda_client = boto3_client('lambda') dynamodb = boto3.resource('dynamodb', region_name="eu-west-1") table = dynamodb.Table("sam_table") items = table.get_item(Key={"ID": "1"}) response = table.query( KeyConditionExpression=Key('ID').eq("1") ) for key,value in response['Items'][0].items(): if key == 'Visit': Visits = int(value) Hit = Visits + 1 response = table.update_item( Key={ 'ID': "1", }, UpdateExpression="set Visit = :r", ExpressionAttributeValues={ ':r': Hit }, ReturnValues="UPDATED_NEW" ) #msg = {"key":"new_invocation", "at": "square"} #invoke_response = lambda_client.invoke(FunctionName="dynamoGet", # InvocationType='RequestResponse', # Payload=json.dumps(msg)) #print(invoke_response) #return {"statusCode": 200,"body": Hit} return { 'statusCode': 200, 'headers': { 'Access-Control-Allow-Headers': 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token', 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET' }, 'body':Hit }
def send_email_ses(config, sender, subject, message, recipients, image_png): """ Sends notification through AWS SES. Does not handle access keys. Use either 1/ configuration file 2/ EC2 instance profile See also http://boto3.readthedocs.org/en/latest/guide/configuration.html. """ from boto3 import client as boto3_client client = boto3_client('ses') msg_root = generate_email(sender, subject, message, recipients, image_png) response = client.send_raw_email(Source=sender, Destinations=recipients, RawMessage={'Data': msg_root.as_string()}) logger.debug(("Message sent to SES.\nMessageId: {},\nRequestId: {},\n" "HTTPSStatusCode: {}").format(response['MessageId'], response['ResponseMetadata']['RequestId'], response['ResponseMetadata']['HTTPStatusCode']))
from __future__ import print_function import json import logging import sys, os from urlparse import parse_qs from boto3 import client as boto3_client from base64 import b64decode log = logging.getLogger() log.setLevel(logging.DEBUG) lambda_client = boto3_client('lambda') with open("lib/token.secret") as secretfile: ENCRYPTED_EXPECTED_TOKEN = secretfile.read().strip() kms = boto3_client('kms') expected_token = kms.decrypt(CiphertextBlob = b64decode(ENCRYPTED_EXPECTED_TOKEN))['Plaintext'] def handler(event, context): req_body = event['body'] params = parse_qs(req_body) token = params['token'][0] if token != expected_token: log.error("Request token (%s) does not match expected token", token) return "Received invalid request token" author = params['text'][0] if 'text' in params else "" response_url = params['response_url'][0]