def function(self, name, func, **kwargs): func = func.replace(':', '.') function = lambda_.Function( f'{name}', handler=func, s3_bucket=self.bucket.bucket, s3_key=self.object.key, s3_object_version=self.object.version_id, runtime='python3.7', role=self.role.arn, **kwargs, ) return function
def add_lambda(self, archive_path: str, sns_topic: sns.Topic): """ Create lambda function with sns invoke permission """ lambda_role = iam.Role( resource_name=format_resource_name("lambda-role"), assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""") lambda_role_policy = iam.RolePolicy( resource_name=format_resource_name("lambda-policy"), role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:*:*:*" }] }""") mail_processor_function = lambda_.Function( resource_name=format_resource_name("function"), role=lambda_role.arn, runtime="python3.7", handler="handler.lambda_handler", code=archive_path, source_code_hash=filebase64sha256(archive_path)) allow_sns = lambda_.Permission( resource_name=format_resource_name("permissions"), action="lambda:InvokeFunction", function=mail_processor_function.name, principal="sns.amazonaws.com", source_arn=sns_topic.arn) return mail_processor_function
def main(): hello_world_lambda = lambda_.Function( "hello_world", runtime="python3.7", role=iam.lambda_role.arn, description="pulumi lambda hello world", handler="main.handler", code=pulumi.AssetArchive({".": pulumi.FileArchive("./lambda")}), ) hello_world_api = apigw.APIGateway("hello_world", hello_world_lambda) hello_world_api = hello_world_api.build() lambda_.Permission( "hello_world", function=hello_world_lambda.name, action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", source_arn=hello_world_api.execution_arn.apply(lambda s: f"{s}/*/*"), )
import pulumi from pulumi_aws import lambda_, sfn import iam import json find_instance = lambda_.Function( "ce-find-instance", role=iam.lambda_role.arn, runtime="python3.7", handler="find_instance.lambda_handler", code=pulumi.AssetArchive({".": pulumi.FileArchive("./lambda")}), ) get_instance_status = lambda_.Function( "ce-get-instance-status", role=iam.lambda_role.arn, runtime="python3.7", handler="get_instance_status.lambda_handler", code=pulumi.AssetArchive({".": pulumi.FileArchive("./lambda")}), ) create_image = lambda_.Function( "ce-create-image", role=iam.lambda_role.arn, runtime="python3.7", handler="create_image.lambda_handler", code=pulumi.AssetArchive({".": pulumi.FileArchive("./lambda")}), ) get_image_status = lambda_.Function( "ce-get-image-status",
LAMBDA_VERSION = '1.0.0' os.system('zip %s %s' % (LAMBDA_PACKAGE, LAMBDA_SOURCE)) # Create an AWS resource (S3 Bucket) bucket = s3.Bucket('lambda-api-gateway-example') mime_type, _ = mimetypes.guess_type(LAMBDA_PACKAGE) obj = s3.BucketObject(LAMBDA_VERSION + '/' + LAMBDA_PACKAGE, bucket=bucket.id, source=FileAsset(LAMBDA_PACKAGE), content_type=mime_type) example_fn = lambda_.Function( 'ServerlessExample', s3_bucket=bucket.id, s3_key=LAMBDA_VERSION + '/' + LAMBDA_PACKAGE, handler="lambda.handler", runtime="python3.7", role=iam.lambda_role.arn, ) example_api = apigateway.RestApi( 'ServerlessExample', description='Pulumi Lambda API Gateway Example') proxy_root_met = apigateway.Method('proxy_root', rest_api=example_api, resource_id=example_api.root_resource_id, http_method='ANY', authorization='NONE') example_root_int = apigateway.Integration( 'lambda_root',
######### LAYERS ########### artifacts_bucket = s3.Bucket('artifacts') # Upload ffmpeg library to bucket api_airtable_layer_zip = s3.BucketObject( 'hello', bucket=artifacts_bucket.id, source=pulumi.FileAsset("./step_hello/hello.py")) ######## LAMBDAS ########### api_airtable = lambda_.Function( 'api-airtable', role=api_lambda_role.arn, runtime="python3.8", handler="handler.app", #layers=[api_airtable_layer.arn], code=pulumi.AssetArchive({'.': pulumi.FileArchive('./step_hello')}), timeout=30, memory_size=512, ) api_lambda_permission = lambda_.Permission( 'api-lambda-permission', action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=api_airtable.name) #env = Environment(loader=FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True) #openapi_spec_template = env.get_template('api.yaml') marv_api_key = apigateway.ApiKey('marv-internal')
role=example_role.name, ) iam.RolePolicyAttachment( "CloudwatchPolicyAttach", policy_arn= "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", role=example_role.name, ) example_function = lambda_.Function( "exampleFunction2", # code="lambda.zip", code="lambda_no_deps.zip", source_code_hash=filebase64sha256("lambda_no_deps.zip"), handler="handler.lambda_handler", role=example_role.arn, runtime="python3.8", timeout=30, opts=ResourceOptions(depends_on=[environment]), **get_environment_function_args(environment), ) logs = cloudwatch.LogGroup( "exampleLogGroup", name=example_function.name.apply(lambda name: f"/aws/lambda/{name}"), ) # API Gateway gateway = apigateway.RestApi("exampleApi")
mount_location = "/mnt/efs" example_function = lambda_.Function( "exampleFunction", code="lambda.zip", source_code_hash=filebase64sha256("lambda.zip"), handler="handler.my_handler", role=example_role.arn, runtime="python3.8", vpc_config={ "security_group_ids": [environment.security_group_id], "subnet_ids": environment.public_subnet_ids }, file_system_config={ "arn": environment.efs_access_point_arn, "local_mount_path": mount_location }, environment={ "variables": { "LAMBDA_PACKAGES_PATH": mount_location, "LD_LIBRARY_PATH": f"/var/lang/lib:/lib64:/usr/lib64:/var/runtime:/var/runtime/lib:/var/task:/var/task/lib:/opt/lib:{mount_location}/lambda_packages/lib", "PATH": f"/var/lang/bin:/usr/local/bin:/usr/bin/:/bin:/opt/bin:{mount_location}/lambda_packages/bin" } }, opts=ResourceOptions(depends_on=[environment])) pulumi.export('file_system_id', environment.file_system_id) pulumi.export('vpc_id', environment.vpc_id)
"ReplicationLambdaAllowDynamoPolicy", role=lambdaRole.name, policy=getAllowDynamoStreamPolicyDocument( dynamoTable.stream_arn).apply(lambda d: json.dumps(d))) lambdaRoleAllowFirehosePutPolicy = iam.RolePolicy( "ReplicationLambdaAllowFirehosePolicy", role=lambdaRole.name, policy=getAllowFirehosePutPolicyDocument( deliveryStream.arn).apply(lambda d: json.dumps(d))) dynamoTriggerFunction = lambda_.Function( 'ReplicationLambdaFunction', role=lambdaRole.arn, runtime='python3.7', handler='dynamoTriggerLambda.handler', code=pulumi.FileArchive("./dynamoTriggerLambda"), environment={"Variables": { "DELIVERY_STREAM_NAME": deliveryStream.name }}) dynamoTrigger = lambda_.EventSourceMapping( "ReplicationDynamoTriggerMapping", event_source_arn=dynamoTable.stream_arn, function_name=dynamoTriggerFunction.arn, starting_position='LATEST') pulumi.export('table_name', dynamoTable.name) pulumi.export('bucket_name', bucket.id) pulumi.export('delivery_stream_name', deliveryStream.name)
def __init__(self, name: str, stack: str, issue: str, runtime: str, handler: str, lambda_archive: pulumi.Input[pulumi.Archive], source_code_hash: str = None, memory_size_mb: int = 128, timeout: int = 1, opts: pulumi.ResourceOptions = None): """ Create Lambda for usage at CloudFront, please use us-east-1 provider in opts. Create Role and grant permissions for edgelambda.awsamazon.com :param name: Name of the component :param stack: Name of the stack, staging or prod for example, used for tags :param issue: Issue tracker id, used for tags :param runtime: Lambda runtime, supported runtimes: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-lambda-function-configuration :param handler: Lambda handler :param lambda_archive: Archive with Lambda code :param source_code_hash: base64(sha256(lambda.zip)) :param memory_size_mb: Lambda memory size in Mb, 128 Mb max for viewer request and response events :param timeout: Lambda timeout, max 30 seconds for origin request and response events and max 5 seconds for viewer request and response events, see details at https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-see-limits :param opts: Standard Pulumi ResourceOptions """ super().__init__('LambdaEdge', name, None, opts) self.name = name self.stack = stack self.issue = issue if timeout > 30: raise LambdaTimeoutValidation( 'Maximum timeout for lambda@edge is 30 seconds for origin events and 5 seconds for viewer events' ) self.tags = { 'lambda-edge': f'{self.name}-{self.stack}', 'stack': self.stack, 'issue': self.issue, } role = iam.Role(f'{name}-lambda-role', path='/service-role/', assume_role_policy=json.dumps(LAMBDA_ROLE), tags=self.tags, opts=pulumi.ResourceOptions(parent=self)) iam.RolePolicy(f'{name}-lambda-policy', role=role.id, policy=json.dumps(LAMBDA_CLOUDWATCH_POLICY), opts=pulumi.ResourceOptions(parent=self)) lambda_edge = lambda_.Function( f'{name}-lambda-edge', description=f'Handler for processing index.html for stack: {stack}, ' f'issue: {issue}', runtime=runtime, handler=handler, code=lambda_archive, source_code_hash=source_code_hash, memory_size=memory_size_mb, timeout=timeout, publish=True, tags=self.tags, role=role.arn, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f'{name}-lambda-edge-permission', action='lambda:GetFunction', function=lambda_edge, principal='edgelambda.amazonaws.com', opts=pulumi.ResourceOptions(parent=self)) self.timeout = lambda_edge.timeout self.arn = lambda_edge.arn self.lambda_edge = lambda_edge self.register_outputs({ 'timeout': self.timeout, 'arn': self.arn, })
# Copyright 2016-2018, Pulumi Corporation. All rights reserved. import iam import pulumi from pulumi_aws import lambda_, sfn hello_world_fn = lambda_.Function( 'helloWorldFunction', role=iam.lambda_role.arn, runtime="python2.7", handler="hello_step.hello", code=pulumi.AssetArchive({'.': pulumi.FileArchive('./step_hello')})) state_defn = state_machine = sfn.StateMachine( 'stateMachine', role_arn=iam.sfn_role.arn, definition=hello_world_fn.arn.apply(lambda arn: """{ "Comment": "A Hello World example of the Amazon States Language using an AWS Lambda Function", "StartAt": "HelloWorld", "States": { "HelloWorld": { "Type": "Task", "Resource": "%s", "End": true } } }""" % arn)) pulumi.export('state_machine_arn', state_machine.id)
def __init__(self, name, scripts_bucket: s3.Bucket = None, managed_policy_arns: List[str] = [], tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:ScriptArchiveLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for script archive lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=scripts_bucket.bucket.apply(inline_policy), opts=pulumi.ResourceOptions(parent=self)) print( f"archive function => {os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))}" ) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='index.main', memory_size=128, timeout=30, code=pulumi.AssetArchive({ # NOTE use relative path from pulumi root 'index.py': pulumi.FileAsset( os.path.abspath( os.path.join(os.getcwd(), '../../src/lambdas/scripts_archive.py'))), }), #code=pulumi.FileAsset(os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=scripts_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
], "Resource": "*" } ] }""") cleanup_old_amis = lambda_.Function( service_naming_convention + '_' + lambda_name, s3_bucket=LAMBDA_BUCKET, s3_key=LAMBDA_VERSION + '/' + LAMBDA_PACKAGE, handler="delete_old_amis.handler", runtime=runtime, role=lambda_role.arn, environment=lambda_.FunctionEnvironmentArgs( variables={ "APP": service_naming_convention + "-app_", "AMI_LIMIT": cleanup_amis_conf.require('ami_limit') }), tags={ "Name": service_naming_convention + "-" + lambda_name, "Application": appname, "Description": "Lambda to cleanup old AMIs for ASG", "Environment": env, "Role": "Lambda", "Pulumi": "True" }) event_rule = cloudwatch.EventRule( service_naming_convention + "-cleanup_old_amis-rule", name=service_naming_convention + '-cleanup-old-amis-event', description="This is lambda for cleanup old amis", schedule_expression="cron(0 3 ? * SUN *)")
# Create zip file of lambda function code os.system('zip %s %s' % (LAMBDA_SCAN_PACKAGE, LAMBDA_SCAN_SOURCE)) mime_type, _ = mimetypes.guess_type(LAMBDA_SCAN_PACKAGE) obj = s3.BucketObject( LAMBDA_VERSION+'/'+LAMBDA_SCAN_PACKAGE, bucket=web_bucket.id, source=FileAsset(LAMBDA_SCAN_PACKAGE), content_type=mime_type ) # Create function from zip file scan_fn = lambda_.Function( 'DynamoImagesScan', s3_bucket=web_bucket.id, s3_key=LAMBDA_VERSION+'/'+LAMBDA_SCAN_PACKAGE, handler="lambda_scan.handler", runtime="python3.7", role=iam.lambda_role.arn, environment={"variables": {"DYNAMODB_TABLE": db.id}} ) # Create endpoint using API Gateway scan_api = apigateway.RestApi( str(scan_fn.id), description='Pulumi Lambda API Gateway Example' ) proxy_root_met = apigateway.Method( 'proxy_root', rest_api=scan_api, resource_id=scan_api.root_resource_id,
"Action": ["s3:*"], "Resource": model_bucket.arn.apply(lambda b: f"{b}/*"), "Effect": "Allow", }], }), ) lambda_func = lambda_.Function("classifier-fn", code=pulumi.AssetArchive({ ".": pulumi.FileArchive("./app"), }), role=role.arn, timeout=300, memory_size=512, runtime="python3.6", handler="app.lambda_handler", layers=["arn:aws:lambda:us-west-2:934676248949:layer:pytorchv1-py36:2"], environment={ "variables": { "MODEL_BUCKET": model_bucket.bucket, "MODEL_KEY": model_object.key, } } ) # The stage name to use for the API Gateway URL custom_stage_name = "api" # Create the Swagger spec for a proxy which forwards all HTTP requests through to the Lambda function. def swagger_spec(lambda_arn): swagger_spec_returns = {
resource_name="policy-attachment", policy_arn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", role=role.name, ) lambda_layer = lambda_.LayerVersion( resource_name="layer", layer_name="example-layer", compatible_runtimes=["python3.6"], code=lambda_package.layer_archive_path, source_code_hash=lambda_package.layer_hash, ) # Create Lambda function function = lambda_.Function( resource_name="function", role=role.arn, runtime="python3.6", description=f"Lambda function running the f`{pulumi.get_project()}` ({pulumi.get_stack()}) project", handler="handler.lambda_handler", code=lambda_package.package_archive, source_code_hash=lambda_package.package_hash, layers=[lambda_layer], ) # path of package archive and lambda layer # containing installed requirements pulumi.export("package_archive_path", lambda_package.package_archive) pulumi.export("lambda_function", function.name)
iam.RolePolicyAttachment( f"{MODULE_NAME}-xray", policy_arn="arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", role=role.id, ) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.weapons_bot", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", tracing_config={"mode": "Active"}, timeout=200, layers=[dependency_layer.arn], environment={ "variables": { "CHAT_STREAM_ARN": chat_stream.arn, "CHAT_STREAM_NAME": chat_stream.name, } }, ) # lambda_.EventSourceMapping( # f"{MODULE_NAME}-sqs-esm", # event_source_arn=weapons_queue.arn, # function_name=aws_lambda.name, # )
def __init__(self, name, datalake_bucket: s3.Bucket = None, datalake_raw_path: str = None, fileproc_bucket: s3.Bucket = None, managed_policy_arns: List[str] = None, package_dir: str = None, tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:GlueNotificationLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for glue notification lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=pulumi.Output.all(datalake_bucket.bucket, fileproc_bucket.bucket).apply( lambda b: inline_policy(b[0], b[1])), opts=pulumi.ResourceOptions(parent=self)) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='glue_notification.main', environment={ 'variables': { 'S3_DATALAKE_BUCKET': datalake_bucket, 'S3_RAW_PATH': datalake_raw_path, 'PULUMI_STACK': pulumi.get_stack(), 'PULUMI_PROJECT': pulumi.get_project() } }, memory_size=256, timeout=60, code=pulumi.AssetArchive({ # use lambda-glue-notification created with build.py '.': pulumi.FileArchive(package_dir), }), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=fileproc_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
def create_functions(appcode_path=None, region=None, account=None, stage=None, lambda_execution_role=None, lambda_layers=None, subnets=None, lambda_sg=None, redis_cluster=None, rds_instance=None, web_socket_api=None): """Create backend functions for Apigw""" # Construct callback_url callback_url = web_socket_api.id.apply( lambda id: "https://{}.execute-api.{}.amazonaws.com/{}/".format( id, region.name, stage)) rds_config = Config().require_object("rds_config") # create lambda permission for Apigateway invocations route_arn = web_socket_api.id.apply( lambda id: "arn:aws:execute-api:{}:{}:{}/*/*".format( region.name, account, id)) send_message_path = os.path.join(appcode_path, 'chatapp-source/sendmessage') send_message_function = lambda_.Function( "sendmessagefunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle messaging", runtime='python3.8', code=AssetArchive({'.': FileArchive(send_message_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '', "callbackurl": callback_url } }) connect_path = os.path.join(appcode_path, 'chatapp-source/connect') connect_function = lambda_.Function( "connectfunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle Auth/connections.", runtime='python3.8', code=AssetArchive({'.': FileArchive(connect_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '', "callbackurl": callback_url, "rds_hostname": rds_instance.address, "rds_port": '3306', "rds_password": rds_config["password"], "rds_database": rds_config["database_name"], "rds_user": rds_config["username"] } }) disconnect_path = os.path.join(appcode_path, 'chatapp-source/disconnect') disconnect_function = lambda_.Function( "disconnectfunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle post connection termination.", runtime='python3.8', code=AssetArchive({'.': FileArchive(disconnect_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '' } }) lambda_.Permission("lambdainvocationpermissions", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=send_message_function.id, source_arn=route_arn) lambda_.Permission("lambdainvocationpermissions-1", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=connect_function.id, source_arn=route_arn) lambda_.Permission("lambdainvocationpermissions-2", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=disconnect_function.id, source_arn=route_arn) return { "connect": connect_function, "disconnect": disconnect_function, "sendmessage": send_message_function }
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for lambda_name, config in resource_specs.items(): config = config if config else {} resource_name = lambda_name resource_tags = config.get("tags") resource_env = config.get("environment") # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Generating ENV vars env_list = {} if resource_env is not None: for each_env_name, each_env_value in resource_env.items(): env_list.update({each_env_name: each_env_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) lambda_function = lambda_.Function( lambda_name, environment=lambda_.FunctionEnvironmentArgs( variables=env_list), handler=config.get("handler"), s3_bucket=config.get("s3_bucket"), s3_key=config.get("s3_key"), s3_object_version=config.get("s3_object_version"), memory_size=config.get("memory_size"), publish=config.get("publish"), reserved_concurrent_executions=config.get( "reserved_concurrent_executions"), role=IAM.RoleARN()[config.get("role")], runtime=config.get("runtime"), timeout=config.get("timeout"), tags=tags_list) # Export pulumi.export(lambda_function._name, lambda_function.id) # Event source mappings for mapping_name, mapping_config in config.get( "event_source_mapping").items(): event_source = mapping_config["event_source"] assert event_source.get( "type" ) == "sqs", "Just sqs is currently supported as event source mapping. You're welcome to implement more." source_arn = SQS.ByName()[event_source["name"]].arn mapping = lambda_.EventSourceMapping( mapping_name, event_source_arn=source_arn, function_name=lambda_function.arn, batch_size=mapping_config.get("batch_size")) pulumi.export(mapping_name, mapping.id) lambdas_by_name[lambda_name] = lambda_function
lambda_variables = Output.all(dynamodb_table.name, bucket.id, chat_stream.arn, chat_stream.name, sns_topic.arn).apply( lambda args: { "CHARACTER_DB": args[0], "MORGUE_BUCKETNAME": args[1], "CHAT_STREAM_ARN": args[2], "CHAT_STREAM_NAME": args[3], "TOPIC_ARN": args[4], }) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.morgue_bot", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", timeout=200, tracing_config={"mode": "Active"}, environment={"variables": lambda_variables}, layers=[dependency_layer.arn], ) lambda_.Permission( "AllowInvocationFromMorgueFileBucket", action="lambda:InvokeFunction", function=aws_lambda.arn, principal="s3.amazonaws.com", source_arn=bucket.arn, ) s3.BucketNotification(
).apply( lambda args: { "CHARACTER_DB": args[0], "TOPIC_ARN": args[1], "WEAPONS_TOPIC": args[2], "CHAT_STREAM_NAME": args[3], } ) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.8", handler="lambda_handler.destinations", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", timeout=200, tracing_config={"mode": "Active"}, environment={"variables": lambda_variables}, layers=[dependency_layer.arn], # source_code_hash=filebase64sha256(f"build/{config.require('artifact_name')}") ) lambda_.EventSourceMapping( f"{MODULE_NAME}-dynamodb-esm", event_source_arn=dynamodb_table.stream_arn, function_name=aws_lambda.name, starting_position="LATEST", ) lambda_.EventSourceMapping(
iam.RolePolicyAttachment( f"{MODULE_NAME}-xray", policy_arn="arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", role=role.id, ) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.twitch_chat_bot", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", tracing_config={"mode": "Active"}, timeout=200, layers=[dependency_layer.arn], environment={ "variables": { "CHARACTER_DB": dynamodb_table.name, "MORGUE_BUCKETNAME": bucket.id, "MORGUEBOT_TWITCH_OAUTH_TOKEN": twitch_oauth_token.ciphertext_blob, "MORGUEBOT_BOT_NAME": "beginbotbot", "MORGUEBOT_CHANNEL": "beginbot", } }, ) lambda_.EventSourceMapping( f"{MODULE_NAME}-kinesis-very-cool-esm", event_source_arn=chat_stream.arn, function_name=aws_lambda.name, starting_position="LATEST",
policy=bucket.arn.apply(lambda_role_policy), ) iam.RolePolicyAttachment( f"{MODULE_NAME}-xray", policy_arn="arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", role=role.id, ) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.morgue_stalker", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", tracing_config={"mode": "Active"}, timeout=900, layers=[dependency_layer.arn], environment={"variables": { "MORGUE_BUCKETNAME": bucket.id }}, ) event_rule = cloudwatch.EventRule( f"{MODULE_NAME}-event-rule", name=f"{MODULE_NAME}-very-cool-every-minute", schedule_expression="rate(10 minutes)", ) event_target = cloudwatch.EventTarget(f"{MODULE_NAME}-event-target", arn=aws_lambda.arn,
mime_type, _ = mimetypes.guess_type(LAMBDA_PACKAGE) deploy_package = s3.BucketObject('deploy_package', key=LAMBDA_VERSION + '/' + LAMBDA_PACKAGE, bucket=bucket.id, source=FileAsset(LAMBDA_PACKAGE), content_type=mime_type) example_fn = lambda_.Function('ServerlessExample', s3_bucket=deploy_package.bucket, s3_key=deploy_package.key, handler="lambda.handler", runtime="python3.7", role=iam.lambda_role.arn, timeout=10, source_code_hash=str(deploy_64_hash), environment={ "variables": { "REDIS_ENDPOINT": cache.cache_nodes[0]['address'] } }, vpc_config={ "subnet_ids": subnet_ids, "security_group_ids": security_group_ids }) example_api = apigateway.RestApi( 'ServerlessExample', description='Pulumi Lambda API Gateway Example') proxy_root_met = apigateway.Method('proxy_root', rest_api=example_api, resource_id=example_api.root_resource_id,