def __init__(self, scope: core.Construct, id: str, bank_account_service: lambda_.Function, stage: Optional[str] = 'prod', **kwargs) -> None: super().__init__(scope, id+'-'+stage, **kwargs) # create SNS topic topic = sns.Topic(self, "BankTopic", display_name="SMSOutbound", topic_name="SMSOutbound") topic.add_subscription(subs.EmailSubscription(email_address="*****@*****.**")) # create the EventBridge stuff bus_name = 'banking-demo-events-'+stage bus = events.EventBus(self, id, event_bus_name=bus_name) events.Rule(self, "HUMAN_REVIEWED_APPLICATION", event_bus=bus, event_pattern=events.EventPattern( detail_type=["HUMAN_REVIEWED_APPLICATION"]), rule_name="HUMAN_REVIEWED_APPLICATION", enabled=True, targets=[ targets.SnsTopic(topic) ]) events.Rule(self, "APPLICATION_SUBMITTED", event_bus=bus, event_pattern=events.EventPattern( detail_type=["APPLICATION_SUBMITTED"]), rule_name="APPLICATION_SUBMITTED", enabled=True) events.Rule(self, "APPLICATION_APPROVED", event_bus=bus, event_pattern=events.EventPattern( detail_type=["APPLICATION_APPROVED"]), rule_name="APPLICATION_APPROVED", enabled=True, targets=[ targets.LambdaFunction(lambda_.Function.from_function_arn( self, "func", bank_account_service.function_arn)) ]) self._event_bus_arn = bus.event_bus_arn
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) queue = sqs.Queue(self, "Queue", queue_name = "Events_DLQ") fn = lambda_.Function(self, "ETL_job_func", runtime=lambda_.Runtime.PYTHON_3_8, handler="lambda_function.handler", code=lambda_.Code.asset('lambda'), dead_letter_queue=queue ) fn_dlq_process = lambda_.Function(self, "DLQ_Process_func", runtime=lambda_.Runtime.PYTHON_3_8, handler="lambda_function.handler", code=lambda_.Code.asset('lambda_dlq') ) rule = events.Rule( self, "Rule", schedule=events.Schedule.cron( minute='0', hour='11') ) rule.add_target(targets.LambdaFunction(fn, dead_letter_queue=queue, # Optional: add a dead letter queue max_event_age=cdk.Duration.hours(2), # Otional: set the maxEventAge retry policy retry_attempts=2 )) rule_dlq = events.Rule( self, "Rule_DLQ", schedule=events.Schedule.cron( minute='0', hour='12') ) rule_dlq.add_target(targets.LambdaFunction(fn_dlq_process)) log_group = logs.LogGroup(self, "EventsLogGroup", log_group_name="EventsLogGroup" ) rule.add_target(targets.CloudWatchLogGroup(log_group))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Serverless Event Processor using Lambda): # Read Lambda Code try: with open("serverless_stacks/lambda_src/konstone_processor.py", mode="r") as f: konstone_fn_code = f.read() except OSError: print("Unable to read Lambda Function Code") # Simple Lambda Function to return event konstone_fn = _lambda.Function( self, "konstoneFunction", function_name="konstone_function", runtime=_lambda.Runtime.PYTHON_3_7, handler="index.lambda_handler", code=_lambda.InlineCode(konstone_fn_code), timeout=core.Duration.seconds(3), reserved_concurrent_executions=1, environment={ "LOG_LEVEL": "INFO", "AUTOMATION": "SKON" }) # Run Every day at 18:00 UTC # schedule deletion or moving of maps from one storage system to another like glacier after 2 months six_pm_cron = _events.Rule(self, "sixPmRule", schedule=_events.Schedule.cron( minute="0", hour="18", month="*", week_day="MON-FRI", year="*")) # Setup Cron Based on Rate # Run Every 3 Minutes run_every_3_minutes = _events.Rule(self, "runEvery3Minutes", schedule=_events.Schedule.rate( core.Duration.minutes(3))) # Add Lambda to Cloudwatch Event Trigger Rule with permissions six_pm_cron.add_target(_targets.LambdaFunction(konstone_fn)) run_every_3_minutes.add_target(_targets.LambdaFunction(konstone_fn))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #import function code try: with open("serverless_stack/functions/function.py", mode="r") as file: function_body = file.read() except OSError: print('File can not read') function_01 = aws_lambda.Function( self, "lambdafunction01", function_name="LambdaTestCDK", runtime=aws_lambda.Runtime.PYTHON_3_6, handler="index.lambda_handler", code=aws_lambda.InlineCode(function_body), timeout=core.Duration.seconds(5), reserved_concurrent_executions=1, environment={'LOG_LEVEL': 'INFO'}) #attached cloudwatch log group log_group01 = aws_logs.LogGroup( self, "cloudwatchlog01", log_group_name=f"/aws/lambda/{function_01.function_name}", removal_policy=core.RemovalPolicy.DESTROY) #cloudwatch event trigger on 6am cron_01 = aws_events.Rule(self, "cron01", schedule=aws_events.Schedule.cron( minute="0", hour="6", month="*", week_day="MON-FRI", year="*")) #cloudwatch event trigger every 5min cron_02 = aws_events.Rule(self, "cron02", schedule=aws_events.Schedule.rate( core.Duration.minutes(5))) #add triggers to lambda cron_01.add_target(aws_events_targets.LambdaFunction(function_01)) cron_02.add_target(aws_events_targets.LambdaFunction(function_01))
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here # EASIER IF SIMPLE python function... # my_function = _lambda.Function(self, # "my-function", # runtime=_lambda.Runtime.PYTHON_3_8, # code=_lambda.Code.asset("src"), # handler="main.handler" # ) my_function = PythonFunction( self, "my-function", entry="src", index="main.py", handler="handler", runtime=_lambda.Runtime.PYTHON_3_8, ) rule = events.Rule(self, "Rule", schedule=events.Schedule.cron(minute="0", hour="18", month="*", week_day="MON-FRI", year="*")) rule.add_target(targets.LambdaFunction(my_function))
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) with open("lambda-handler.py", encoding="utf8") as fp: handler_code = fp.read() lambdaFn = lambda_.Function( self, "Singleton", code=lambda_.InlineCode(handler_code), handler="index.main", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, ) # Run every day at 6PM UTC # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html rule = events.Rule( self, "Rule", schedule=events.Schedule.cron(minute='0', hour='18', month='*', week_day='MON-FRI', year='*'), ) rule.add_target(targets.LambdaFunction(lambdaFn))
def __init__(self, scope: Construct, id: str, custom_function_es: IFunction, custom_function_rds: IFunction, state_machine: IStateMachine, **kwargs) -> None: super().__init__(scope, id, **kwargs) custom_rule_es = config.CustomRule(self, "Custom_es", configuration_changes=True, lambda_function=custom_function_es, config_rule_name=constants.CONFIG_RULE_ES_PUBLIC ) custom_rule_es.scope_to_resource("AWS::Elasticsearch::Domain") custom_rule_rds = config.CustomRule(self, "Custom_rds", configuration_changes=True, lambda_function=custom_function_rds, config_rule_name=constants.CONFIG_RULE_RDS_PUBLIC ) custom_rule_rds.scope_to_resource("AWS::RDS::DBInstance") rule_detail = { "requestParameters": { "evaluations": { "complianceType": [ "NON_COMPLIANT" ], "complianceResourceType": ["AWS::Elasticsearch::Domain", "AWS::RDS::DBInstance"] } } } event_pattern = events.EventPattern(source=["aws.config"], detail=rule_detail) events.Rule(self, 'ComplianceCustomRule', enabled=True, event_pattern=event_pattern, targets=[targets.SfnStateMachine(state_machine)])
def __init__(self, scope: core.Construct, id: str, kafkaClientFirewall: ec2.ISecurityGroup, vpc: ec2.IVpc, kafkaCloudMap: cloudmap.Service, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Kafka data producer lambdaFn = lambda_.Function( self, "KafkaProducer", code=lambda_.AssetCode('fargate_workshop_cdk/function.zip'), handler="kafka-producer.main", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, description='Simple Kafka producer for Fargate workshop', environment={ 'NAMESPACE': kafkaCloudMap.namespace.namespace_name, 'SERVICE': kafkaCloudMap.service_name, 'TOPIC_NAME': 'MyTopic' }, memory_size=512, security_group=kafkaClientFirewall, vpc=vpc) lambdaFn.add_to_role_policy(statement=iam.PolicyStatement( resources=['*'], actions=['servicediscovery:DiscoverInstances'])) lambdaFn.add_to_role_policy(statement=iam.PolicyStatement( resources=['*'], actions=['kafka:GetBootstrapBrokers'])) # Run every 5 minutes # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html rule = events.Rule( self, "Rule", schedule=events.Schedule.rate(duration=core.Duration.minutes(5), ), ) rule.add_target(targets.LambdaFunction(lambdaFn))
def __init__(self, app: cdk.App, id: str, **kwargs) -> None: super().__init__(app, id) table_name = 'RandomWriterTable' with open("lambda-handler.py", encoding="utf8") as fp: handler_code = fp.read() lambda_fn = lambda_.Function( self, "RandomWriter", code=lambda_.InlineCode(handler_code), handler="index.main", timeout=300, runtime=lambda_.Runtime.PYTHON37, environment={'TABLE_NAME': table_name}, ) # Add our 'Every Minute' scheduling rule for this Lambda (via a CloudWatch scheduled Role) rule = events.Rule(self, "Rule", schedule_expression="cron(* * * * ? *)") rule.add_target(targets.LambdaFunction(lambda_fn)) # Build our DynamoDb Table dynamodb = Table(self, table_name, table_name=table_name, partition_key={ 'name': 'ID', 'type': AttributeType.String }, billing_mode=BillingMode.PayPerRequest) dynamodb.grant_full_access(lambda_fn)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.table = aws_dynamodb.Table( self, f"{id}-table", partition_key=aws_dynamodb.Attribute( name="url", type=aws_dynamodb.AttributeType.STRING), ) deployer = get_lambda(self, f"{id}-lambda-deployer", code=f"lib/stacks/{id}/lambdas", handler="deployer.handler", layers=[get_layer(self, "feedparser", id)], environment={ "DEPLOYER_FEED_URLS": env["DEPLOYER_FEED_URLS"], "DYNAMODB_TABLE": self.table.table_name, "NETLIFY_HOOK": env["NETLIFY_HOOK"], }) self.table.grant_read_write_data(deployer) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(hour="6-16", minute="0"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(handler=deployer))
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) # IAM permissions for the Lambda functions configCustomResourcePermission = iam.PolicyStatement(actions=[ 'config:ListDiscoveredResources', 'config:DeleteResourceConfig', 'config:PutResourceConfig', 'cloudformation:DescribeType' ], resources=['*']) configRuleLambdaPermission = iam.PolicyStatement( actions=['config:PutEvaluations'], resources=['*']) secretManagerPermission = iam.PolicyStatement( actions=['secretsmanager:GetSecretValue'], resources=[secret_manager_arn]) # Lambda Layer lambdaLayer = lambda_.LayerVersion( self, "ConfigADConnectorLayer", code=lambda_.Code.from_asset( os.path.join(dirname, f'../.build/{lambda_layer_file_name}')), compatible_runtimes=[lambda_.Runtime.PYTHON_3_6]) # Lambda function and periodic trigger for Config - AD connector configAdLambdaFn = lambda_.Function( self, "ConfigADConnector", code=lambda_.Code.from_asset( os.path.join(dirname, f'../.build/{lambda_code_file_name}')), handler=f"{connector_function_name}.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_6, layers=[lambdaLayer], initial_policy=[ configCustomResourcePermission, secretManagerPermission ], environment=lambda_env_map) rule = events.Rule( self, "Rule", schedule=events.Schedule.rate(duration=core.Duration.minutes(1))) rule.add_target(targets.LambdaFunction(configAdLambdaFn)) # Custom Config Rule with a Lambda function for AD user evaluation ruleEvaluationLambdaFn = lambda_.Function( self, "ADUserPasswdExpiresCheck", code=lambda_.Code.from_asset( os.path.join(dirname, f'../.build/{lambda_code_file_name}')), handler=f"{config_rule_function_name}.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_6, layers=[lambdaLayer], initial_policy=[configRuleLambdaPermission], environment=lambda_env_map) configRule = config.CustomRule(self, "ADUserPasswdExpires", lambda_function=ruleEvaluationLambdaFn, configuration_changes=True) configRule.scope_to_resources(os.environ['RESOURCE_TYPE'])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Setup SNS topic we will be sending messages to on failure sns_topic = sns.Topic(self, "ServiceStatusTopic", display_name="Service Status Topic") # Create lambda resource using code from local disk lambda_handler = _lambda.Function( self, "SimpleHealthCheckLambda", code=_lambda.Code.from_asset(LAMBDA_FUNC_PATH), runtime=_lambda.Runtime.PYTHON_3_7, handler="simple_health_check.simple_health_check", memory_size=256, timeout=core.Duration.seconds(5)) # Subscribe an HTTPS endpoint to our SNS topic and grant the lambda function permissions to use it sns_topic.add_subscription( sns_subscriptions.UrlSubscription( "https://endpointstatus.snagajob.com")) sns_topic.grant_publish(lambda_handler) # Create a CloudWatch Event trigger for every minute rule = events.Rule( self, "SimpleHealthCheckLambdaRule", schedule=events.Schedule.expression("cron(0/1 * * * ? *)")) # Add lambda function as target of event rule rule.add_target(targets.LambdaFunction(lambda_handler))
def __init__(self, app: App, id: str) -> None: super().__init__(app, id) # SQS Queue queue = sqs.Queue( self, "queue", ) # Custom EventBridge Bus custom_bus = events.EventBus( self, "bus", event_bus_name="test-bus-cdk" ) # EventBridge Rule rule = events.Rule( self, "rule", event_bus=custom_bus ) rule.add_event_pattern( source=["my-cdk-application"], detail_type=["message-for-queue"] ) rule.add_target(targets.SqsQueue(queue)) # Stack Outputs CfnOutput( self, "QueueURL", description="URL of SQS Queue", value=queue.queue_url )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') lambda_function = lb.Function(self, "notifiaction_lambda", runtime=lb.Runtime.PYTHON_3_8, code=lb.Code.asset('lambda'), handler='hello.handler') cw_rule = events.Rule(self, 'cwrule', schedule=events.Schedule.cron(minute='0', hour='5', month='*', week_day='*', year='*')) cw_rule.add_target(targets.LambdaFunction(lambda_function)) # cw_rule.add_target(targets.LambdaFunction(lambda_function)) lambda_topic = sns.Topic(self, 'lambdatopic', topic_name='serverless-lambda-topic') lambda_topic.add_subscription(subs.LambdaSubscription(lambda_function))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eventTargets = [] policyStatement = _iam.PolicyStatement( resources=['*'], actions=[ "cloudtrail:LookupEvents", "ec2:DeleteVolume", "ec2:DescribeVolumes", ], effect=_iam.Effect.ALLOW, ) eventHandler = _lambda.Function(self, 'EbsCleanup', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset('lambda'), handler='ebs_cleanup.handler', timeout=core.Duration.minutes(5)) eventHandler.add_to_role_policy(policyStatement) eventTargets.append(_targets.LambdaFunction(handler=eventHandler)) # Schedule the cleanup function to run once a month schedule = _events.Schedule.rate(core.Duration.days(30)) _events.Rule(scope=self, id='EbsCleanupRule', description='Cleanup detached volumes older than 30 days', rule_name='EbsCleanupRule', schedule=schedule, targets=eventTargets)
def add_states_schedule(self, action:str, schedule:events.Schedule, payload:Mapping[str,Any]=None)->None: """ Creates a collection schedule """ if payload == None: payload={} if not 'Action' in payload: payload['Action']=action # Define the long running process workflow... name_prefix = 'Fsi{}-Collector_{}'.format(self.resources.landing_zone.zone_name, action) long_running_process = FsiLongRunningCollectionProcess(self,name_prefix, action_name= action, resources=self.resources, function= self.function) # Create schedules... events.Rule(self,action+'Rule', rule_name='{}'.format(name_prefix), description='Fsi Collector '+action, schedule= schedule, #schedule= events.Schedule.rate(core.Duration.minutes(1)), targets=[ targets.SfnStateMachine( machine=long_running_process.state_machine, dead_letter_queue=sqs.Queue(self,'{}_dlq'.format(name_prefix), queue_name='{}_dlq'.format(name_prefix), removal_policy= core.RemovalPolicy.DESTROY), input= events.RuleTargetInput.from_object({ 'Payload': payload })) ])
def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) lambdaFn = lambda_.Function( self, "Singleton", code=lambda_.Code.asset("./lambda"), handler="index.main", timeout=core.Duration.seconds(900), runtime=lambda_.Runtime.PYTHON_3_8, ) lambdaFn.add_to_role_policy( iam.PolicyStatement(actions=[ "ec2:RunInstances", "ec2:CreateTags", "iam:PassRole", "logs:CreateLogStream", "logs:PutLogEvents", "logs:CreateLogGroup" ], resources=['*'])) # Run every day at 6PM UTC # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html rule = events.Rule( self, "Rule", schedule=events.Schedule.cron(minute='0', hour='5', month='*', week_day='SUN-FRI', year='*'), ) rule.add_target(targets.LambdaFunction(lambdaFn))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ingest_func = lambdas.Function( self, 'sample-ingest-function', # function_name=function_name, code=lambdas.Code.from_asset( f'./functions/sample_ingest_function'), # If no dependencies. runtime=lambdas.Runtime.PYTHON_3_7, handler=f'main.lambda_handler', # role=self.function_role, timeout=core.Duration.seconds(900), memory_size=1024, environment={"key": "value"}, # layers=[] ) events.Rule( self, 'ingest-trigger-sample', description='Trigger for sample ingest', schedule=events.Schedule.cron(minute="0", hour="4"), # Every day at 4am targets=[targets.LambdaFunction(handler=ingest_func)])
def __init__(self, scope: core.Construct, id: str, slack_channel: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_role = _iam.Role( self, 'SlackLambdaRole', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMReadOnlyAccess'), _iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole') ]) function = _lambda.Function(self, 'BatchSlackLambda', handler='notify_slack.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset('lambdas/batch'), environment={ "SLACK_HOST": "hooks.slack.com", "SLACK_CHANNEL": slack_channel }, role=lambda_role) rule = _events.Rule(self, 'BatchEventToSlackLambda') rule.add_event_pattern( source=['aws.batch'], detail_type=['Batch Job State Change'], detail={'status': ['FAILED', 'SUCCEEDED', 'RUNNABLE']}) rule.add_target(_events_targets.LambdaFunction(handler=function))
def __init__(self, app: App, id: str) -> None: super().__init__(app, id) # CloudWatch Logs Group log_group = logs.LogGroup( self, "logs", retention=logs.RetentionDays.ONE_DAY, removal_policy = RemovalPolicy.DESTROY ) # Custom EventBridge Bus custom_bus = events.EventBus( self, "bus", event_bus_name="test-bus-cdk" ) # EventBridge Rule rule = events.Rule( self, "rule", event_bus=custom_bus ) rule.add_event_pattern( source=["my-cdk-application"], detail_type=["message"] ) rule.add_target(targets.CloudWatchLogGroup(log_group)) CfnOutput( self, "LogGroupName", description="Name of CloudWatch Log Group", value=log_group.log_group_name )
def _init_(self, scope: core.Construct, id: str, **kwargs) -> None: super()._init_(scope, id, **kwargs) # The code that defines your stack goes here # 1. Create Cloudwatch Rule # 2. Make that rule Track Cloudtrail events ep = {"source": ["aws.logs"]} rule = events.Rule(self, "cdkRule_clw", description='Rule created by CDK', enabled=True, rule_name="rulebycdk_clw", event_pattern=ep) # 3. Create response lambda and add it as a target of the rule lambda_dir_path = os.path.join(os.getcwd(), "aws_cdk") response_lambda = _lambda.Function( self, "InClw01ResponseFunction", runtime=_lambda.Runtime.PYTHON_3_8, handler="clwUnauthAccessResponse.lambda_handler", code=_lambda.Code.from_asset(lambda_dir_path), function_name="InClw01ResponseFunction") rule.add_target(response_lambda)
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #SNS Topic MySnsTopic = sns.Topic(self, "MySnsTopic") # Custom EventBridge Bus custom_bus = events.EventBus(self, "bus", event_bus_name="test-bus-cdk") # EventBridge Rule rule = events.Rule(self, "rule", event_bus=custom_bus) # Event Pattern to filter events rule.add_event_pattern(source=["my-application"], detail_type=["message"]) # SNS topic as target for Eventbridge Rue rule.add_target(targets.SnsTopic(MySnsTopic)) # CDK Outputs CfnOutput(self, "SNS topic name", description="SNS topic name", value=MySnsTopic.topic_name) CfnOutput(self, "SNS topic ARN", description="SNS topic ARN", value=MySnsTopic.topic_arn)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__( scope, id, **kwargs, ) self.lambda_function = aws_lambda.Function( self, "CeleryMetricsLambdaFunction", code=aws_lambda.Code.asset("awslambda"), handler="publish_celery_metrics.lambda_handler", runtime=aws_lambda.Runtime.PYTHON_3_7, environment=scope.variables.regular_variables, ) self.celery_default_cw_metric_schedule = events.Rule( self, "CeleryDefaultCWMetricSchedule", schedule=events.Schedule.rate(core.Duration.minutes(5)), targets=[ events_targets.LambdaFunction(handler=self.lambda_function) ], ) # TODO: refactor this to loop through CloudWatch metrics multiple celery queues scope.celery_default_service.default_celery_queue_cw_metric.grant_put_metric_data( scope.backend_service.backend_task.task_role )
def create_url_requester(self): url_requester = self.create_lambda_with_error_alarm("url_requester") url_requester.add_event_source( aws_lambda_event_sources.DynamoEventSource( self.yelp_table, starting_position=aws_lambda.StartingPosition.TRIM_HORIZON, batch_size=5, bisect_batch_on_error=True, retry_attempts=0, )) url_requester.add_event_source( aws_lambda_event_sources.DynamoEventSource( self.config_table, starting_position=aws_lambda.StartingPosition.TRIM_HORIZON, batch_size=5, bisect_batch_on_error=True, retry_attempts=0, )) rule = aws_events.Rule( self, "UrlRequesterRule", schedule=aws_events.Schedule.cron(minute="*/5", hour="*", month="*", week_day="*", year="*"), ) rule.add_target(aws_events_targets.LambdaFunction(url_requester)) self.url_requester = url_requester return self.url_requester
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.table = aws_dynamodb.Table( self, f"{id}-table", partition_key=aws_dynamodb.Attribute( name="url", type=aws_dynamodb.AttributeType.STRING), ) poller = get_lambda(self, f"{id}-lambda-poller", code=f"lib/stacks/{id}/lambdas", handler="pagespeed_poller.handler", layers=[get_layer(self, "requests_oauthlib", id)], environment={ "DYNAMODB_TABLE": self.table.table_name, "GOOGLE_PAGESPEED_API_KEY": env["GOOGLE_PAGESPEED_API_KEY"], "GOOGLE_PAGESPEED_TARGET_URLS": env["GOOGLE_PAGESPEED_TARGET_URLS"], }) self.table.grant_read_write_data(poller) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(hour="6-16", minute="30"), # pylint: disable=no-value-for-parameter ) cronjob.add_target(aws_events_targets.LambdaFunction(handler=poller))
def __init__(self, app: App, id: str) -> None: super().__init__(app, id) # Lambda Function with open("lambda-handler.py", encoding="utf8") as fp: handler_code = fp.read() lambdaFn = lambda_.Function( self, "Singleton", code=lambda_.InlineCode(handler_code), handler="index.main", timeout=Duration.seconds(10), runtime=lambda_.Runtime.PYTHON_3_9, ) # Set Lambda Logs Retention and Removal Policy logs.LogGroup( self, 'logs', log_group_name = f"/aws/lambda/{lambdaFn.function_name}", removal_policy = RemovalPolicy.DESTROY, retention = logs.RetentionDays.ONE_DAY ) # EventBridge Rule rule = events.Rule( self, "Rule", ) rule.add_event_pattern( source=["cdk.myApp"], detail_type=["transaction"] ) rule.add_target(targets.LambdaFunction(lambdaFn))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here lambdaFn = _lambda.Function(self, "SampleLambdaFunction", code=_lambda.Code.from_asset('function/'), runtime=_lambda.Runtime.PYTHON_3_7, handler="index.lambda_handler", function_name="sample_lambda_function" ) # 環境変数を追加 lambdaFn.add_environment(key="STAGE", value="DEV") # s3バケットを作成し、通知イベントを設定 bucket = _s3.Bucket(self, "SampleBucket", bucket_name="kimi-first-cdk-bucket") notification = aws_s3_notifications.LambdaDestination(lambdaFn) bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, notification, _s3.NotificationKeyFilter(prefix="hoge", suffix=".csv")) # 定期的に実行するイベントを設定 rule = _events.Rule(self, "SampleEventRule", rule_name="schedule_trigger_event", schedule=_events.Schedule.expression("cron(10 * * * ? *)") ) rule.add_target(_targets.LambdaFunction(lambdaFn))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # DynamoDB Table table = dynamo_db.Table(self, "Table", partition_key=dynamo_db.Attribute( name="requestid", type=dynamo_db.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) # Create the Lambda function we want to run on a schedule scheduled_lambda = _lambda.Function( self, 'ScheduledLambda', runtime=_lambda.Runtime.NODEJS_12_X, code=_lambda.Code.from_asset('lambda-fns/scheduled-lambda'), handler='index.handler', environment={"TABLE_NAME": table.table_name}) # Allow our lambda fn to write to the table table.grant_read_write_data(scheduled_lambda) # Create EventBridge rule that will execute our Lambda every 2 minutes schedule = events.Rule( self, 'scheduledLambda-schedule', schedule=events.Schedule.expression('rate(2 minutes)')) #Set the target of our EventBridge rule to our Lambda function schedule.add_target(targets.LambdaFunction(scheduled_lambda))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # create producer lambda function coincheck_rate_recorder_lambda = aws_lambda.Function( self, "coincheck_rate_recorder", runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", code=aws_lambda.Code.asset("./lambda")) coincheck_rate_recorder_lambda.add_environment("TABLE_NAME", rate_table_name) # grant permission to lambda to read from demo table table = aws_dynamodb.Table.from_table_name(self, 'table', rate_table_name) table.grant_write_data(coincheck_rate_recorder_lambda) # Run every 1 minuts one_minute_rule = aws_events.Rule( self, "one_minute_rule", schedule=aws_events.Schedule.rate(core.Duration.minutes(1)), ) one_minute_rule.add_target( targets.LambdaFunction(coincheck_rate_recorder_lambda))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambdaFn = lambda_.Function( self, "Singleton", code=lambda_.AssetCode(path="./lambda"), handler="handler.main", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_7, environment={"PYTHONPATH": "/var/task/packages:/var/runtime"}, ) # Run every 1 hour # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html rule = events.Rule( self, "Rule", schedule=events.Schedule.cron(minute='0', hour='0/1', month='*', week_day='*', year='*'), ) rule.add_target(targets.LambdaFunction(lambdaFn))