def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) self._table = ddb.Table(self, 'Hits', partition_key={ 'name': 'path', 'type': ddb.AttributeType.STRING }) self._handler = _lambda.Function(self, 'HitCountHandler', runtime=_lambda.Runtime.PYTHON_3_7, handler='hitcount.handler', code=_lambda.Code.asset('lambda'), environment={ 'DOWNSTREAM_FUNCTION_NAME': downstream.function_name, 'HITS_TABLE_NAME': self._table.table_name, }) self._table.grant_read_write_data(self.handler) downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) self._table = ddb.Table( self, "Hits", partition_key={ "name": "path", "type": ddb.AttributeType.STRING }, ) self._handler = _lambda.Function( self, "HitCountHandler", runtime=_lambda.Runtime.PYTHON_3_7, handler="hitcount.handler", code=_lambda.Code.asset( "/Users/vincent/Workspace/python_lambda_iac_deployment/python_lambda_iac_deployment/lambda_function" ), environment={ "DOWNSTREAM_FUNCTION_NAME": downstream.function_name, "HITS_TABLE_NAME": self._table.table_name, }, ) self._table.grant_read_write_data(self.handler) downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: lambda_function.IFunction) -> None: super().__init__(scope, id) self._table = dynamodb.Table( self, "Hits", table_name="Samples_CDK_HitCountTable", partition_key=dynamodb.Attribute( name="path", type=dynamodb.AttributeType.STRING ) ) self._handler = lambda_function.Function( self, "HitCountHandler", function_name="Samples_CDK_HitCounter", runtime=lambda_function.Runtime.PYTHON_3_7, code=lambda_function.Code.asset("lambda"), handler="hitcount.handler", environment={ "DOWNSTREAM_FUNCTION_NAME": downstream.function_name, "HITS_TABLE_NAME": self._table.table_name } ) self.table.grant_read_write_data(self.handler) downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) # define a DynamoDB table with 'path' as the partition key # **every DynamoDB table must have a single partition key table = ddb.Table(self, 'Hits', table_name="dyna-HitCountDB", partition_key={ 'name': 'path', 'type': ddb.AttributeType.STRING }) # define a Lambda function self._handler = _lambda.Function( self, 'HitCountHandler', function_name="dyna-HitCount", runtime=_lambda.Runtime.PYTHON_3_7, handler='hitcount.handler', code=_lambda.Code.asset('lambda'), # wire the Lambda's environment variables # to the function_name and table_name of our resources environment={ 'DOWNSTREAM_FUNCTION_NAME': downstream.function_name, 'HITS_TABLE_NAME': table.table_name, }) # allow lambda to read/write the DynamoDB table table.grant_read_write_data(self.handler) # Grant invoke permissions downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) self._table = ddb.Table(self, "Hits", partition_key={ "name": "path", "type": ddb.AttributeType.STRING }) self._handler = _lambda.Function( self, "HitCountHandler", runtime=_lambda.Runtime.PYTHON_3_7, handler='hitcount.handler', code=_lambda.Code.asset('lambda'), environment={ "DOWNSTREAM_FUNCTION_NAME": downstream.function_name, # resolve at run time "HITS_TABLE_NAME": self._table.table_name, # same a table_name }) # gives the hitcounter lambda permissions to write to dynamodb table self._table.grant_read_write_data(self.handler) # gives the HintCounter permission to invoke the downstream lambda function downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) self._table = ddb.Table( self, "Hits", partition_key={ "name": "path", "type": ddb.AttributeType.STRING }, ) self._handler = _lambda.Function( self, "HitCountHandler", runtime=_lambda.Runtime.PYTHON_3_7, handler="hitcount.handler", code=_lambda.Code.from_asset(os.path.join(os.getcwd(), "lambda")), environment={ "DOWNSTREAM_FUNCTION_NAME": downstream.function_name, "HITS_TABLE_NAME": self._table.table_name, }, ) self._table.grant_read_write_data(self.handler) downstream.grant_invoke(self.handler)
def __init__(self, scope: core.Construct, id: str, downstream: _lambda.IFunction, **kwargs): super().__init__(scope, id, **kwargs) # Define the DynamoDB table to store hit counts table = ddb.Table(self, 'Hits', partition_key={ 'name': 'path', 'type': ddb.AttributeType.STRING }) with open("lambda_src/request_processor.py", encoding="utf-8") as fp: request_processor_handler_code = fp.read() # Define the lambda funtion for counting hits self._handler = _lambda.Function( self, 'requestProcessorId', runtime=_lambda.Runtime.PYTHON_3_7, handler='index.handler', code=_lambda.Code.inline(request_processor_handler_code), environment={ 'DOWNSTREAM_FUNCTION_NAME': downstream.function_name, 'HITS_TABLE_NAME': table.table_name, }) # Add permission for lambda to write to table table.grant_read_write_data(self._handler) # All this Lambda to Invoke downstream Lambda downstream.grant_invoke(self._handler)
def __init__( self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, social_log_group: aws_logs.ILogGroup, pagespeed_table: aws_dynamodb.ITable, **kwargs) -> None: super().__init__(scope, id, **kwargs) api_lambda = get_lambda( self, id, code='lib/stacks/{id}/{id}'.format(id=id), handler='main.handler', environment={ 'CORS_ALLOW_ORIGIN': env['CORS_ALLOW_ORIGIN'], 'PUSHOVER_TOKEN': env['PUSHOVER_TOKEN'], 'PUSHOVER_USERKEY': env['PUSHOVER_USERKEY'], 'LAMBDA_FUNCTIONS_LOG_LEVEL': 'INFO', 'LAMBDA_NOTIFICATIONS': lambda_notifications.function_name, 'PAGESPEED_TABLE': pagespeed_table.table_name, 'REPORT_LOG_GROUP_NAME': social_log_group.log_group_name, }, ) lambda_notifications.grant_invoke(api_lambda) social_log_group.grant(api_lambda, "logs:GetLogEvents", "logs:DescribeLogStreams") pagespeed_table.grant_read_data(api_lambda) cert = aws_certificatemanager.Certificate( self, '{}-certificate'.format(id), domain_name=env['API_DOMAIN'], ) domain = aws_apigateway.DomainNameOptions( certificate=cert, domain_name=env['API_DOMAIN'], ) cors = aws_apigateway.CorsOptions( allow_methods=['POST'], allow_origins=[env['CORS_ALLOW_ORIGIN']] if "CORS_ALLOW_ORIGIN" in env else aws_apigateway.Cors.ALL_ORIGINS) aws_apigateway.LambdaRestApi( self, '%s-gateway' % id, handler=api_lambda, domain_name=domain, default_cors_preflight_options=cors, )
def __init__( self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, **kwargs) -> None: super().__init__(scope, id, **kwargs) _lambda = get_lambda( self, f"{id}-lambda", code=f"lib/stacks/{id}/lambdas", handler="send_report.handler", environment={ "LAMBDA_FUNCTIONS_LOG_LEVEL": "INFO", "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name, }, timeout=core.Duration.minutes(15), # pylint: disable=no-value-for-parameter ) lambda_notifications.grant_invoke(_lambda) aws_iam.Policy( self, f"{id}-iam-policy-logs", roles=[_lambda.role], statements=[ aws_iam.PolicyStatement( actions=[ "logs:DescribeLogGroups", "logs:GetQueryResults", "logs:StartQuery", ], resources=[ f"arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*" ], ) ], ) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(hour="0", minute="0"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(handler=_lambda))
def __init__( self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, **kwargs) -> None: super().__init__(scope, id, **kwargs) function = get_lambda( self, f"{id}-lambda", code=f"lib/stacks/{id.replace('-', '_')}/lambdas", handler="backups_monitor.handler", environment={ "BUCKETS_TO_MONITOR": env["BUCKETS_TO_MONITOR"], "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name, }) lambda_notifications.grant_invoke(function) aws_iam.Policy( self, f"{id.replace('-', '_')}-iam-policy", roles=[function.role], statements=[ aws_iam.PolicyStatement( actions=["s3:ListBucket"], resources=[ f"arn:aws:s3:::{line.split(',')[0]}" for line in env["BUCKETS_TO_MONITOR"].split(";") ], ) ], ) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(minute="0", hour="6"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(function))
def __init__(self, scope: core.Construct, id: str, downstream_function: _lambda.IFunction) -> None: super().__init__(scope, id) self._table = _ddb.Table(self, 'HitsTable', partition_key=_ddb.Attribute( name='path', type=_ddb.AttributeType.STRING)) functions_path = os.path.join(os.path.dirname(__file__), '../lambda') self._handler = _lambda.Function( self, 'HitCountHandler', runtime=_lambda.Runtime.PYTHON_3_7, handler='hitcount.handler', code=_lambda.Code.from_asset(functions_path), environment={ 'DOWNSTREAM_FUNCTION': downstream_function.function_name, 'HITS_TABLE_NAME': self._table.table_name, }) downstream_function.grant_invoke(self.handler) self._table.grant_read_write_data(self.handler)
def __init__(self, scope: core.Construct, id: str, lambda_notifications: aws_lambda.IFunction, **kwargs) -> None: super().__init__(scope, id, **kwargs) poller = get_lambda(self, f"{id}-lambda-poller", code=f"lib/stacks/{id}/lambdas", handler="whois_poller.handler", environment={ "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name, "WHOIS_DOMAINS": env["WHOIS_DOMAINS"], "WHOISXMLAPI_KEY": env["WHOISXMLAPI_KEY"], }) lambda_notifications.grant_invoke(poller) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(hour="23", minute="30"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(handler=poller))
def __init__( self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, **kwargs) -> None: super().__init__(scope, id, **kwargs) # CloudWatch LogGroup and Stream to store 'since' timestamp value since_log_group = aws_logs.LogGroup( self, f"{id}-log-group", log_group_name=f"{id}-timestamps", retention=DEFAULT_LOG_RETENTION, removal_policy=core.RemovalPolicy.DESTROY, ) since_log_group.add_stream( f"{id}-log-stream", log_stream_name=since_log_group.log_group_name, ) # Lambda shared code lambda_code = code_from_path(path=f"lib/stacks/{id}/lambdas") # Lambda create_epub (and layers): build epub file and store to S3 bucket epub_bucket = get_bucket(self, f"{id}-epub-bucket") lambda_create_epub = get_lambda( self, id + "-create-epub", code=lambda_code, handler="create_epub.handler", environment={ "EPUB_BUCKET": epub_bucket.bucket_name, }, layers=[ get_layer(self, layer_name=layer, prefix=id) for layer in ("pandoc", "html2text", "requests_oauthlib") ], timeout=core.Duration.minutes(5), # pylint: disable=no-value-for-parameter ) epub_bucket.grant_write(lambda_create_epub) # Lambda send_to_kindle: invoked when new MOBI dropped into S3 bucket, deliver MOBI as # email attachment via lambda_notifications mobi_bucket = get_bucket(self, f"{id}-mobi-bucket") lambda_send_to_kindle = get_lambda( self, id + "-send-to-kindle", code=lambda_code, handler="send_to_kindle.handler", environment={ "KINDLE_EMAIL": env["KINDLE_EMAIL"], "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name, "MOBI_SRC_BUCKET": mobi_bucket.bucket_name, "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"], "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"], }) mobi_bucket.add_event_notification( event=aws_s3.EventType.OBJECT_CREATED_PUT, dest=aws_s3_notifications.LambdaDestination(lambda_send_to_kindle), ) lambda_notifications.grant_invoke(lambda_send_to_kindle) aws_iam.Policy( self, f"{id}-mail-attachment-policy", roles=[lambda_notifications.role], statements=[ aws_iam.PolicyStatement( actions=["s3:GetObject"], resources=[f"{mobi_bucket.bucket_arn}/*"]) ], ) # Lambda reader: fetch new articles from Pocket and fan-out trigger create_epub Lambda lambda_reader = get_lambda( self, id + "-reader", code=lambda_code, handler="reader.handler", environment={ "LAMBDA_PUBLISHER": lambda_create_epub.function_name, "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"], "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"], "SINCE_LOG_GROUP": since_log_group.log_group_name, }, ) since_log_group.grant( lambda_reader, "logs:GetLogEvents", "logs:PutLogEvents", ) lambda_create_epub.grant_invoke(lambda_reader) # Fargate task: run dockerized `kindlegen` to parse EPUB to MOBI, # triggered by trigger_ecs_task Lambda # https://medium.com/@piyalikamra/s3-event-based-trigger-mechanism-to-start-ecs-far-gate-tasks-without-lambda-32f57ed10b0d cluster, vpc = get_fargate_cluster(self, id) mem_limit = "512" task = get_fargate_task(self, id, mem_limit) aws_iam.Policy( self, f"{id}-bucket-policy", roles=[task.task_role], statements=[ aws_iam.PolicyStatement( actions=["s3:GetObject"], resources=[f"{epub_bucket.bucket_arn}/*"]), aws_iam.PolicyStatement( actions=["s3:PutObject"], resources=[f"{mobi_bucket.bucket_arn}/*"]), ], ) container = get_fargate_container(self, id, task, mem_limit) # Lambda trigger_ecs_task: trigger Fargate task when new EPUB file is dropped into epub_bucket lambda_trigger_ecs_task = get_lambda( self, f"{id}-trigger-ecs-task", code=lambda_code, handler="trigger_ecs_task.handler", environment={ "ECS_CLUSTER": cluster.cluster_arn, "ECS_CLUSTER_SECURITY_GROUP": vpc.vpc_default_security_group, "ECS_CLUSTER_SUBNET": vpc.public_subnets[0].subnet_id, "ECS_CONTAINER": container.container_name, "ECS_TASK": task.task_definition_arn, "MOBI_DEST_BUCKET": mobi_bucket.bucket_name, }, ) epub_bucket.add_event_notification( event=aws_s3.EventType.OBJECT_CREATED_PUT, dest=aws_s3_notifications.LambdaDestination( lambda_trigger_ecs_task), ) aws_iam.Policy( self, f"{id}-lambda-trigger-policy", roles=[lambda_trigger_ecs_task.role], statements=[ aws_iam.PolicyStatement( actions=["ecs:RunTask"], resources=[task.task_definition_arn], ), aws_iam.PolicyStatement( actions=["iam:PassRole"], resources=[ task.execution_role.role_arn, task.task_role.role_arn, ], ) ], ) # Cloudwatch cronjob event to check for new articles every hour cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(minute="0"), # pylint: disable=no-value-for-parameter ) cronjob.add_target( aws_events_targets.LambdaFunction(handler=lambda_reader))
def __init__(self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, **kwargs) -> None: super().__init__(scope, id, **kwargs) # CloudWatch LogGroup and Stream to store 'since' timestamp value since_log_group = aws_logs.LogGroup( self, f"{id}-log-group", log_group_name=f"{id}-timestamps", retention=DEFAULT_LOG_RETENTION, removal_policy=core.RemovalPolicy.DESTROY, ) since_log_group.add_stream( f"{id}-log-stream", log_stream_name=since_log_group.log_group_name, ) # Lambda shared code lambda_code = code_from_path(path=f"lib/stacks/{id}/lambdas") # Lambda create_doc (and layers): build document file and store to S3 bucket bucket = get_bucket(self, f"{id}-bucket") lambda_create_doc = get_lambda( self, id + "-create-document", code=lambda_code, handler="create_doc.handler", environment={ "DOCUMENT_BUCKET": bucket.bucket_name, }, layers=[get_layer(self, layer_name=layer, prefix=id) for layer in ("readability", "requests_oauthlib")], timeout=core.Duration.minutes(5), # pylint: disable=no-value-for-parameter ) bucket.grant_write(lambda_create_doc) # Lambda send_to_kindle: invoked when new documents dropped into S3 bucket, # deliver document as email attachment via lambda_notifications lambda_send_to_kindle = get_lambda( self, id + "-send-to-kindle", code=lambda_code, handler="send_to_kindle.handler", environment={ "KINDLE_EMAIL": env["KINDLE_EMAIL"], "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name, "DOCUMENT_SRC_BUCKET": bucket.bucket_name, "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"], "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"], } ) bucket.add_event_notification( event=aws_s3.EventType.OBJECT_CREATED_PUT, dest=aws_s3_notifications.LambdaDestination(lambda_send_to_kindle), ) lambda_notifications.grant_invoke(lambda_send_to_kindle) aws_iam.Policy( self, f"{id}-mail-attachment-policy", roles=[lambda_notifications.role], statements=[ aws_iam.PolicyStatement( actions=["s3:GetObject"], resources=[f"{bucket.bucket_arn}/*"] ) ], ) # Lambda reader: fetch new articles from Pocket and fan-out trigger create_doc Lambda lambda_reader = get_lambda( self, id + "-reader", code=lambda_code, handler="reader.handler", environment={ "LAMBDA_PUBLISHER": lambda_create_doc.function_name, "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"], "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"], "SINCE_LOG_GROUP": since_log_group.log_group_name, }, ) since_log_group.grant( lambda_reader, "logs:GetLogEvents", "logs:PutLogEvents", ) lambda_create_doc.grant_invoke(lambda_reader) # Cloudwatch cronjob event to check for new articles every hour cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(minute="0"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(handler=lambda_reader))