def add_lambda(self, archive_path: str, sns_topic: sns.Topic): """ Create lambda function with sns invoke permission """ lambda_role = iam.Role( resource_name=format_resource_name("lambda-role"), assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" }, "Effect": "Allow", "Sid": "" } ] }""") lambda_role_policy = iam.RolePolicy( resource_name=format_resource_name("lambda-policy"), role=lambda_role.id, policy="""{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": [ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], "Resource": "arn:aws:logs:*:*:*" }] }""") mail_processor_function = lambda_.Function( resource_name=format_resource_name("function"), role=lambda_role.arn, runtime="python3.7", handler="handler.lambda_handler", code=archive_path, source_code_hash=filebase64sha256(archive_path)) allow_sns = lambda_.Permission( resource_name=format_resource_name("permissions"), action="lambda:InvokeFunction", function=mail_processor_function.name, principal="sns.amazonaws.com", source_arn=sns_topic.arn) return mail_processor_function
def main(): hello_world_lambda = lambda_.Function( "hello_world", runtime="python3.7", role=iam.lambda_role.arn, description="pulumi lambda hello world", handler="main.handler", code=pulumi.AssetArchive({".": pulumi.FileArchive("./lambda")}), ) hello_world_api = apigw.APIGateway("hello_world", hello_world_lambda) hello_world_api = hello_world_api.build() lambda_.Permission( "hello_world", function=hello_world_lambda.name, action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", source_arn=hello_world_api.execution_arn.apply(lambda s: f"{s}/*/*"), )
role=role.arn, runtime="python3.6", handler="lambda_handler.morgue_stalker", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", tracing_config={"mode": "Active"}, timeout=900, layers=[dependency_layer.arn], environment={"variables": { "MORGUE_BUCKETNAME": bucket.id }}, ) event_rule = cloudwatch.EventRule( f"{MODULE_NAME}-event-rule", name=f"{MODULE_NAME}-very-cool-every-minute", schedule_expression="rate(10 minutes)", ) event_target = cloudwatch.EventTarget(f"{MODULE_NAME}-event-target", arn=aws_lambda.arn, rule=event_rule.name) lambda_.Permission( "AllowInvocationFromCloudWatch", action="lambda:InvokeFunction", function=aws_lambda.arn, principal="events.amazonaws.com", source_arn=event_rule.arn, )
def __init__(self, name: str, stack: str, issue: str, runtime: str, handler: str, lambda_archive: pulumi.Input[pulumi.Archive], source_code_hash: str = None, memory_size_mb: int = 128, timeout: int = 1, opts: pulumi.ResourceOptions = None): """ Create Lambda for usage at CloudFront, please use us-east-1 provider in opts. Create Role and grant permissions for edgelambda.awsamazon.com :param name: Name of the component :param stack: Name of the stack, staging or prod for example, used for tags :param issue: Issue tracker id, used for tags :param runtime: Lambda runtime, supported runtimes: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-lambda-function-configuration :param handler: Lambda handler :param lambda_archive: Archive with Lambda code :param source_code_hash: base64(sha256(lambda.zip)) :param memory_size_mb: Lambda memory size in Mb, 128 Mb max for viewer request and response events :param timeout: Lambda timeout, max 30 seconds for origin request and response events and max 5 seconds for viewer request and response events, see details at https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-requirements-see-limits :param opts: Standard Pulumi ResourceOptions """ super().__init__('LambdaEdge', name, None, opts) self.name = name self.stack = stack self.issue = issue if timeout > 30: raise LambdaTimeoutValidation( 'Maximum timeout for lambda@edge is 30 seconds for origin events and 5 seconds for viewer events' ) self.tags = { 'lambda-edge': f'{self.name}-{self.stack}', 'stack': self.stack, 'issue': self.issue, } role = iam.Role(f'{name}-lambda-role', path='/service-role/', assume_role_policy=json.dumps(LAMBDA_ROLE), tags=self.tags, opts=pulumi.ResourceOptions(parent=self)) iam.RolePolicy(f'{name}-lambda-policy', role=role.id, policy=json.dumps(LAMBDA_CLOUDWATCH_POLICY), opts=pulumi.ResourceOptions(parent=self)) lambda_edge = lambda_.Function( f'{name}-lambda-edge', description=f'Handler for processing index.html for stack: {stack}, ' f'issue: {issue}', runtime=runtime, handler=handler, code=lambda_archive, source_code_hash=source_code_hash, memory_size=memory_size_mb, timeout=timeout, publish=True, tags=self.tags, role=role.arn, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f'{name}-lambda-edge-permission', action='lambda:GetFunction', function=lambda_edge, principal='edgelambda.amazonaws.com', opts=pulumi.ResourceOptions(parent=self)) self.timeout = lambda_edge.timeout self.arn = lambda_edge.arn self.lambda_edge = lambda_edge self.register_outputs({ 'timeout': self.timeout, 'arn': self.arn, })
def __init__(self, name, scripts_bucket: s3.Bucket = None, managed_policy_arns: List[str] = [], tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:ScriptArchiveLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for script archive lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=scripts_bucket.bucket.apply(inline_policy), opts=pulumi.ResourceOptions(parent=self)) print( f"archive function => {os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))}" ) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='index.main', memory_size=128, timeout=30, code=pulumi.AssetArchive({ # NOTE use relative path from pulumi root 'index.py': pulumi.FileAsset( os.path.abspath( os.path.join(os.getcwd(), '../../src/lambdas/scripts_archive.py'))), }), #code=pulumi.FileAsset(os.path.abspath(os.path.join(os.getcwd(),'../../src/lambdas/scripts_archive.py'))), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=scripts_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
def AwsgiHandler(self, name, zone, domain, package, func, __opts__, **lambdaargs): """ Define a handler to accept requests, using awsgi """ func = package.function(f"{name}-function", func, **lambdaargs, **opts(parent=self)) invoke_policy = lambda_.Permission( f'{name}-function-permission', function=func, action='lambda:InvokeFunction', principal='elasticloadbalancing.amazonaws.com', **opts(parent=func)) netinfo = get_public_subnets(opts=__opts__) @netinfo.apply def vpc_id(info): vpc, subnets, is_v6 = info return vpc.id @netinfo.apply def netstack(info): vpc, subnets, is_v6 = info return 'dualstack' if is_v6 else 'ipv4' @netinfo.apply def subnet_ids(info): vpc, subnets, is_v6 = info return [sn.id for sn in subnets] cert = Certificate(f"{name}-cert", domain=domain, zone=zone, **opts(parent=self)) # TODO: Cache this sg = ec2.SecurityGroup(f"{name}-sg", vpc_id=vpc_id, ingress=[ { 'from_port': 80, 'to_port': 80, 'protocol': "tcp", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 443, 'to_port': 443, 'protocol': "tcp", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 80, 'to_port': 80, 'protocol': "tcp", 'ipv6_cidr_blocks': ['::/0'], }, { 'from_port': 443, 'to_port': 443, 'protocol': "tcp", 'ipv6_cidr_blocks': ['::/0'], }, ], egress=[ { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': ['0.0.0.0/0'], }, { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'ipv6_cidr_blocks': ['::/0'], }, ], **opts(parent=self)) alb = elb.LoadBalancer(f"{name}-alb", load_balancer_type='application', subnets=subnet_ids, ip_address_type=netstack, security_groups=[sg], enable_http2=True, **opts(parent=self)) target = elb.TargetGroup( f"{name}-target", target_type='lambda', lambda_multi_value_headers_enabled= False, # AWSGI does not support this yet health_check={ 'enabled': True, 'path': '/', 'matcher': '200-299', 'interval': 30, 'timeout': 5, }, **opts(parent=self)) elb.TargetGroupAttachment(f"{name}-target-func", target_group_arn=target.arn, target_id=func.arn, **opts(depends_on=[invoke_policy], parent=self)) elb.Listener(f"{name}-http", load_balancer_arn=alb.arn, port=80, protocol='HTTP', default_actions=[{ 'type': 'forward', 'target_group_arn': target.arn, }], **opts(parent=self)) elb.Listener(f"{name}-https", load_balancer_arn=alb.arn, port=443, protocol='HTTPS', ssl_policy='ELBSecurityPolicy-TLS-1-2-Ext-2018-06', certificate_arn=cert.cert_arn, default_actions=[{ 'type': 'forward', 'target_group_arn': target.arn, }], **opts(parent=self)) a_aaaa( f"{name}-record", name=domain, zone_id=zone.zone_id, aliases=[ { 'name': alb.dns_name, 'zone_id': alb.zone_id, 'evaluate_target_health': True, }, ], **opts(parent=self), )
resource_id=proxy_root_met.resource_id, http_method=proxy_root_met.http_method, integration_http_method='POST', type='AWS_PROXY', uri=example_fn.invoke_arn) example_dep = apigateway.Deployment( 'example', rest_api=example_api, stage_name="example-test", __opts__=ResourceOptions(depends_on=[example_root_int])) example_perm = lambda_.Permission( "apigw", statement_id="AllowAPIGatewayInvoke", action="lambda:InvokeFunction", function=example_fn, principal="apigateway.amazonaws.com", source_arn=example_dep.execution_arn.apply(lambda x: f"{x}/*/*")) # Export the name of the bucket with lambda code # List bucket with: # aws s3 ls --recursive `pulumi stack output bucket_name` export('bucket_name', bucket.id) # Export the name of the lambda # Test with: # aws lambda invoke --region=eu-west-1 --function-name=`pulumi stack output lambda_name` output.txt export('lambda_name', example_fn.id) # Export the name of the API endpoint export('base_url', example_dep.invoke_url)
def create_functions(appcode_path=None, region=None, account=None, stage=None, lambda_execution_role=None, lambda_layers=None, subnets=None, lambda_sg=None, redis_cluster=None, rds_instance=None, web_socket_api=None): """Create backend functions for Apigw""" # Construct callback_url callback_url = web_socket_api.id.apply( lambda id: "https://{}.execute-api.{}.amazonaws.com/{}/".format( id, region.name, stage)) rds_config = Config().require_object("rds_config") # create lambda permission for Apigateway invocations route_arn = web_socket_api.id.apply( lambda id: "arn:aws:execute-api:{}:{}:{}/*/*".format( region.name, account, id)) send_message_path = os.path.join(appcode_path, 'chatapp-source/sendmessage') send_message_function = lambda_.Function( "sendmessagefunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle messaging", runtime='python3.8', code=AssetArchive({'.': FileArchive(send_message_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '', "callbackurl": callback_url } }) connect_path = os.path.join(appcode_path, 'chatapp-source/connect') connect_function = lambda_.Function( "connectfunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle Auth/connections.", runtime='python3.8', code=AssetArchive({'.': FileArchive(connect_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '', "callbackurl": callback_url, "rds_hostname": rds_instance.address, "rds_port": '3306', "rds_password": rds_config["password"], "rds_database": rds_config["database_name"], "rds_user": rds_config["username"] } }) disconnect_path = os.path.join(appcode_path, 'chatapp-source/disconnect') disconnect_function = lambda_.Function( "disconnectfunction", role=lambda_execution_role.arn, handler='lambda_handler.lambda_handler', description="Backend lambda to handle post connection termination.", runtime='python3.8', code=AssetArchive({'.': FileArchive(disconnect_path)}), layers=[lambda_layers.arn], timeout=10, vpc_config={ "securityGroupIds": [lambda_sg.id], "subnetIds": [subnet.id for subnet in subnets] }, environment={ "variables": { "redis_hostname": redis_cluster.cache_nodes[0]['address'], "redis_port": '6379', "redis_password": '' } }) lambda_.Permission("lambdainvocationpermissions", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=send_message_function.id, source_arn=route_arn) lambda_.Permission("lambdainvocationpermissions-1", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=connect_function.id, source_arn=route_arn) lambda_.Permission("lambdainvocationpermissions-2", action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=disconnect_function.id, source_arn=route_arn) return { "connect": connect_function, "disconnect": disconnect_function, "sendmessage": send_message_function }
aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="lambda_handler.morgue_bot", s3_key=config.require("artifact_name"), s3_bucket="morgue-artifacts", timeout=200, tracing_config={"mode": "Active"}, environment={"variables": lambda_variables}, layers=[dependency_layer.arn], ) lambda_.Permission( "AllowInvocationFromMorgueFileBucket", action="lambda:InvokeFunction", function=aws_lambda.arn, principal="s3.amazonaws.com", source_arn=bucket.arn, ) s3.BucketNotification( f"{MODULE_NAME}-new-morgue-files", bucket=bucket.id, lambda_functions=[{ "events": ["s3:ObjectCreated:*"], "lambda_function_arn": aws_lambda.arn }], )
"CHAT_STREAM_ARN": args[2], "CHAT_STREAM_NAME": args[3], }) aws_lambda = lambda_.Function( f"{MODULE_NAME}", role=role.arn, runtime="python3.6", handler="handler.lambda_handler", s3_key=config.require("artifact_name"), tracing_config={"mode": "Active"}, s3_bucket="morgue-artifacts", timeout=200, layers=[dependency_layer.arn], environment={"variables": lambda_variables}, ) # lambda_.EventSourceMapping( # f"{MODULE_NAME}-sqs-esm", # event_source_arn=xl_upgrades_queue.arn, # function_name=aws_lambda.name, # ) lambda_.Permission( "AllowInvocationFromSQSQueue", action="lambda:InvokeFunction", function=aws_lambda.arn, principal="sqs.amazonaws.com", source_arn=xl_upgrades_queue.arn, )
def __init__(self, name, datalake_bucket: s3.Bucket = None, datalake_raw_path: str = None, fileproc_bucket: s3.Bucket = None, managed_policy_arns: List[str] = None, package_dir: str = None, tags: Dict[str, str] = None, opts: pulumi.ResourceOptions = None): super().__init__('hca:GlueNotificationLambda', name, None, opts) merged_tags = tags.copy() if tags else {} merged_tags.update({'hca:dataclassification': 'pii'}) role = iam.Role(f"{name}-role", path="/lambda/", description=f"role for glue notification lambda", assume_role_policy=json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com" } }] }), force_detach_policies=True, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) # attach managed policies if managed_policy_arns: for index, policy in enumerate(managed_policy_arns): iam.RolePolicyAttachment( f"{name}-attach-policy-{index}", policy_arn=policy, role=role, opts=pulumi.ResourceOptions(parent=self)) fileprocpolicy = iam.RolePolicy( f"{name}-inline-policy", role=role, policy=pulumi.Output.all(datalake_bucket.bucket, fileproc_bucket.bucket).apply( lambda b: inline_policy(b[0], b[1])), opts=pulumi.ResourceOptions(parent=self)) self.function = lambda_.Function( f"{name}-function", runtime='python3.6', description= 'copy files from fileproc bucket to datalake raw bucket and trigger glue jobs', handler='glue_notification.main', environment={ 'variables': { 'S3_DATALAKE_BUCKET': datalake_bucket, 'S3_RAW_PATH': datalake_raw_path, 'PULUMI_STACK': pulumi.get_stack(), 'PULUMI_PROJECT': pulumi.get_project() } }, memory_size=256, timeout=60, code=pulumi.AssetArchive({ # use lambda-glue-notification created with build.py '.': pulumi.FileArchive(package_dir), }), role=role.arn, tags=merged_tags, opts=pulumi.ResourceOptions(parent=self)) lambda_.Permission(f"{name}-permission", action='lambda:InvokeFunction', principal='s3.amazonaws.com', function=self.function, source_arn=fileproc_bucket.arn, opts=pulumi.ResourceOptions(parent=self))
"APP": service_naming_convention + "-app_", "AMI_LIMIT": cleanup_amis_conf.require('ami_limit') }), tags={ "Name": service_naming_convention + "-" + lambda_name, "Application": appname, "Description": "Lambda to cleanup old AMIs for ASG", "Environment": env, "Role": "Lambda", "Pulumi": "True" }) event_rule = cloudwatch.EventRule( service_naming_convention + "-cleanup_old_amis-rule", name=service_naming_convention + '-cleanup-old-amis-event', description="This is lambda for cleanup old amis", schedule_expression="cron(0 3 ? * SUN *)") cloudwatch.EventTarget(service_naming_convention + "-cleanup-old-amis-target", arn=cleanup_old_amis.arn, rule=event_rule.name) lambda_.Permission(service_naming_convention + "-cleanup-old-amis-permission", action="lambda:InvokeFunction", function=cleanup_old_amis.name, principal="events.amazonaws.com", source_arn=event_rule.arn) export('lambda_name', cleanup_old_amis.id) export('bucket_name', LAMBDA_BUCKET)
type='AWS_PROXY', uri=scan_fn.invoke_arn ) scan_dep = apigateway.Deployment( 'images_scan', rest_api=scan_api, stage_name="images_scan-dev", __opts__=ResourceOptions(depends_on=[scan_root_int]) ) # Set function permissions (access S3 and DynamoDB) scan_perm = lambda_.Permission( "apigw", statement_id="AllowAPIGatewayInvoke", action="lambda:InvokeFunction", function=scan_fn, principal="apigateway.amazonaws.com", source_arn=scan_dep.execution_arn.apply(lambda x: f"{x}/*/*") ) # Lambda function for S3 trigger lambda_rekognition = lambda_.Function( resource_name='ImagesRekognition', role=iam.lambda_role.arn, runtime="python3.7", handler="lambda_rekognition.lambda_handler", code=pulumi.AssetArchive({ '.': pulumi.FileArchive('./lambda_rekognition') }), environment={"variables": {"DYNAMODB_TABLE": db.id}} )
# Create the API Gateway Rest API, using a swagger spec. rest_api = apigateway.RestApi("api", body=lambda_func.arn.apply(lambda lambda_arn: swagger_spec(lambda_arn)), ) # Create a deployment of the Rest API. deployment = apigateway.Deployment("api-deployment", rest_api=rest_api, # Note: Set to empty to avoid creating an implicit stage, we'll create it # explicitly below instead. stage_name="") # Create a stage, which is an addressable instance of the Rest API. Set it to point at the latest deployment. stage = apigateway.Stage("api-stage", rest_api=rest_api, deployment=deployment, stage_name=custom_stage_name, ) # Give permissions from API Gateway to invoke the Lambda invoke_permission = lambda_.Permission("api-lambda-permission", action="lambda:invokeFunction", function=lambda_func, principal="apigateway.amazonaws.com", source_arn=deployment.execution_arn.apply( lambda execution_arn: execution_arn + "*/*"), ) # Export the https endpoint of the running Rest API pulumi.export("endpoint", deployment.invoke_url.apply(lambda url: url + custom_stage_name))
######## LAMBDAS ########### api_airtable = lambda_.Function( 'api-airtable', role=api_lambda_role.arn, runtime="python3.8", handler="handler.app", #layers=[api_airtable_layer.arn], code=pulumi.AssetArchive({'.': pulumi.FileArchive('./step_hello')}), timeout=30, memory_size=512, ) api_lambda_permission = lambda_.Permission( 'api-lambda-permission', action="lambda:InvokeFunction", principal="apigateway.amazonaws.com", function=api_airtable.name) #env = Environment(loader=FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True) #openapi_spec_template = env.get_template('api.yaml') marv_api_key = apigateway.ApiKey('marv-internal') # Start of api.yaml first_part_swagger_openapi = """ swagger: "2.0" info: version: "2021-03-29T15:07:58Z" title: "marv" basePath: "/dev"
deployment = apigateway.Deployment( "exampleDeployment", rest_api=gateway.id, opts=ResourceOptions(depends_on=[*methods, *integrations]), ) stage = apigateway.Stage("exampleStage", deployment=deployment.id, rest_api=gateway.id, stage_name="dev") lambda_permission = lambda_.Permission( "lambdaPermission", action="lambda:InvokeFunction", function=example_function.name, principal="apigateway.amazonaws.com", source_arn=gateway.execution_arn.apply( lambda execution_arn: f"{execution_arn}/*/*/*"), opts=ResourceOptions(depends_on=[example_function, deployment]), ) pulumi.export("file_system_id", environment.file_system_id) pulumi.export("vpc_id", environment.vpc_id) pulumi.export("public_subnets", environment.public_subnet_ids) pulumi.export("private_subnet", environment.private_subnet_id) pulumi.export("security_group_id", environment.security_group_id) pulumi.export("pulumi_access_token_parameter_name", environment.pulumi_token_param_name) pulumi.export( "api_endpoint", gateway.id.apply(