def __init__(self, scope: core.Construct, construct_id: str, queue_context: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) q = dict(self.node.try_get_context(queue_context)) queue_dlq = _sqs.Queue(self, q["queue_dlq_name"], queue_name=q["queue_dlq_name"]) queue = _sqs.Queue( self, q["queue_name"], queue_name=q["queue_name"], dead_letter_queue=_sqs.DeadLetterQueue( max_receive_count=q["queue_dlq_max_receive_count"], queue=queue_dlq), encryption=_sqs.QueueEncryption.KMS_MANAGED, visibility_timeout=Duration.seconds(30), delivery_delay=Duration.seconds(15), retention_period=Duration.hours(14), ) self.queue = queue self.queue_dlq = queue_dlq # Outputs core.CfnOutput(self, "QueueUrl", value=queue.queue_url)
def _get_health_check(self): return HealthCheck( healthy_http_codes="200", healthy_threshold_count=5, interval=Duration.seconds(30), path="/healthz", timeout=Duration.seconds(5), unhealthy_threshold_count=2, )
def __create_cloud_front_cache_policy(self) -> aws_cloudfront.CachePolicy: return aws_cloudfront.CachePolicy( self, 'CloudFrontCachePolicy', comment= 'The CloudFront cache policy used by the DefaultCacheBehavior', default_ttl=Duration.seconds(1), max_ttl=Duration.seconds(1), min_ttl=Duration.seconds(1))
def get_alb_config(self, protocol: ElbProtocol = ElbProtocol.HTTP, port: int = None) -> ElbHealthCheck: """Get the health check configuration for the ALB Target Group. Parameters: port: Use a different port for the health check. By default ALB uses the traffic port protocol: Specify the protocol to use for the health check. By default, this is non-SSL HTTP. """ # An ALB load balancer uses it's health check to determine which tasks # in the ECS service should receive traffic. Additionally, it will # notify the ECS system to stop any tasks that it determines are # unhealthy. The ALB health check configuration is mostly # independent of the ECS health check configuration. # # The same health check configuration is used to determine when a # newly started task is ready to receive traffic, and also to # determine if an ongoing task has become broken. # # There is a problem with the whole-system configuration, in that # if the ALB stops a newly started task that it thinks is unhealthy, # then ECS doesn't realise that the service is unhealthy, and the # deployment will incorrectly succeed. # # The solution is to tweak the health check configurations so that # the ECS health check quickly detects and stops an unhealthy task # (either at startup or ongoing), and the ALB health check never # stops a task at startup. # # Therefore we create an ALB health check configuration that is quite # slow. This means that some genuine problems don't get automatically # resolved in a timely manner, but TBH the nature of this category of # problem (external to the container itself) is unusual. if self.timeout >= self.check_interval_alb: raise ValueError( "Healthcheck timeout is longer than the ALB repeat interval") if (self.min_time_to_unhealthy_alb + 30) < self.max_time_to_unhealthy_ecs: raise ValueError( "Healthcheck timing means that the ALB might stop an unhealthy " "ECS task before ECS does, which is undesirable.") return ElbHealthCheck( # Unhealthy ECS tasks always get stopped and never get to recover, # therefore there is no point setting this. # healthy_threshold_count=5, interval=Duration.seconds(self.check_interval_alb), path=self.endpoint_path, port=port, protocol=protocol, timeout=Duration.seconds(self.timeout), unhealthy_threshold_count=self.num_checks_alb, )
def create_cloudfront_distribution(self): return Distribution(self, "image-resize-distribution", default_behavior=BehaviorOptions( origin=S3Origin(bucket=self.image_bucket), cache_policy=CachePolicy( self, "image-resize-cache-policy", default_ttl=Duration.seconds(0), min_ttl=Duration.seconds(0), max_ttl=Duration.days(365))), default_root_object="index.html")
def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None: super().__init__(scope, id) # Step Function submit_job = tasks.LambdaInvoke(self, "Submit Job", lambda_function=functions.send_email_approval, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), result_path=sfn.JsonPath.DISCARD ) wait_x = sfn.Wait(self, "Wait", time= sfn.WaitTime.duration(Duration.minutes(2)) ) get_status = tasks.LambdaInvoke(self, "Get Job Status", lambda_function=functions.check_status_dynamo, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), result_path="$.status" ) restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy", lambda_function=functions.restric_es_policy, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), ) restrict_rds = tasks.LambdaInvoke(self, "Restric RDS", lambda_function=functions.restric_rds_policy, payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}), ) restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC) restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC) definition = (submit_job.next(wait_x) .next(get_status) .next(sfn.Choice(self, "Job Complete?") .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x) # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task) # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task)) .otherwise(sfn.Choice(self, "Remediation Choice") .when(restrict_es_condition, restrict_es) .when(restrict_rds_condition, restrict_rds))) ) self.state_machine = sfn.StateMachine(self, "StateMachine", definition=definition, timeout=Duration.hours(2) )
def __init__( self, scope: Construct, id: str, cluster: _Cluster, shared_airflow_env: dict, vpc: _Vpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) task_definition = FargateTaskDefinition( self, "task-def", cpu=512, memory_limit_mib=1024 ) container = task_definition.add_container( "container", image=ContainerImage.from_registry("apache/airflow:1.10.12-python3.8"), command=["webserver"], logging=LogDriver.aws_logs(stream_prefix="webserver"), environment=shared_airflow_env, ) port_mapping = PortMapping(container_port=8080, host_port=8080) container.add_port_mappings(port_mapping) service = FargateService( self, "service", cluster=cluster, task_definition=task_definition, ) lb = ApplicationLoadBalancer(self, "lb", vpc=vpc, internet_facing=True) listener = lb.add_listener("public_listener", port=80, open=True) health_check = HealthCheck( interval=Duration.seconds(60), path="/health", timeout=Duration.seconds(5), ) listener.add_targets( "webserver", port=8080, targets=[service], health_check=health_check, ) CfnOutput(self, "LoadBalancerDNS", value=lb.load_balancer_dns_name)
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierDocDBProps, **kwargs): """ Initializes a new instance of StorageTier :param scope: The scope of this construct. :param stack_id: the ID of this construct. :param props: The properties for the storage tier. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, props=props, **kwargs) doc_db = DatabaseCluster( self, 'DocDBCluster', vpc=props.vpc, vpc_subnets=SubnetSelection(subnet_type=SubnetType.PRIVATE), instance_type=props.database_instance_type, # TODO - For cost considerations this example only uses 1 Database instance. # It is recommended that when creating your render farm you use at least 2 instances for redundancy. instances=1, master_user=Login(username='******'), engine_version='3.6.0', backup=BackupProps( # We recommend setting the retention of your backups to 15 days # for security reasons. The default retention is just one day. # Please note that changing this value will affect cost. retention=Duration.days(15)), # TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that your data is not accidentally deleted, you should modify this value. removal_policy=RemovalPolicy.DESTROY) self.database = DatabaseConnection.for_doc_db(database=doc_db, login=doc_db.secret)
def __init__( self, scope: Construct, construct_id: str, *, deploy_env: str, users_role: aws_iam.Role, package_name: str, botocore_lambda_layer: aws_lambda_python.PythonLayerVersion, ): super().__init__(scope, construct_id) self.lambda_function = aws_lambda.Function( self, f"{deploy_env}-{construct_id}-function", function_name=f"{deploy_env}-{construct_id}", handler=f"backend.{package_name}.entrypoint.lambda_handler", runtime=PYTHON_RUNTIME, timeout=Duration.seconds(60), code=bundled_code(package_name), layers=[botocore_lambda_layer], # type: ignore[list-item] ) self.lambda_function.add_environment("DEPLOY_ENV", deploy_env) self.lambda_function.grant_invoke(users_role) # type: ignore[arg-type]
def get_ecs_service_properties(self) -> dict: """Get health-check-related properties for an ECS Service.""" # This is the period of time that an unhealthy state is not acted # on - an unhealthy task won't be stopped inside this period. # Note that failed health check requests are still counted as # contributing to an unhealthy state. # # Also note that the same value is used by both ECS container # health checks and ALB health checks - see # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service_definition_parameters.html # This means that if ALB determines that the task is unhealthy # before this time finished, then ALB will stop the task instead # of ECS and our deployment model will break. # # Therefore we hard-code it to something small, in order to ensure # it has no impact. grace_period = 10 if self.min_time_to_unhealthy_alb < grace_period: raise ValueError( "The ECS grace period is too long, which means that ALB might " "mark a container as unhealthy at the same time as ECS.") return dict( health_check_grace_period=Duration.seconds(grace_period), # The default healthy percentages are reasonable. # # It's also possible that they are ignored for Fargate deployments - see # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service_definition_parameters.html # # Therefore we don't set these. # min_healthy_percent=50, # max_healthy_percent=200, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here handler = _lambda.Function( self, "demo_func", runtime=_lambda.Runtime.PYTHON_3_7, handler="demo_func.handler", timeout=Duration.minutes(1), # pylint: disable=E1120 code=_lambda.Code.asset("lambda_code/demo_func"), # pylint: disable=E1120 ) api_gw = _apigw.RestApi(self, "ApiGatewayForSlack", rest_api_name="gw_for_slack") exam_entity = api_gw.root.add_resource("test") exam_entity_lambda_integration = _apigw.LambdaIntegration( handler, proxy=False, integration_responses=[{ "statusCode": "200" }], ) exam_entity.add_method( "GET", exam_entity_lambda_integration, method_responses=[{ "statusCode": "200" }], )
def __init__( self, scope: Stack, name: str, twilio_account_sid: str, twilio_auth_token: str, twilio_workspace_sid: str ) -> None: self.__name = name super().__init__( scope=scope, id=name, uuid=f'{name}-uuid', function_name=name, code=self.__code(), layers=[TwilioLayer(scope, f'TwilioLayerFor{name}')], timeout=Duration.minutes(1), handler='index.handler', runtime=Runtime.PYTHON_3_8, environment={ 'TWILIO_ACCOUNT_SID': twilio_account_sid, 'TWILIO_AUTH_TOKEN': twilio_auth_token, 'TWILIO_WORKSPACE_SID': twilio_workspace_sid } )
async def create_canary_function(self, id: str) -> Function: function = None with open('canary/canary.py', 'r') as code: canary_code = code.read() function = Function( self, '{}CanaryFunction'.format(id), timeout=Duration.seconds(3), code=InlineCode(canary_code), handler='index.handler', tracing=Tracing.ACTIVE, initial_policy=[MINIMAL_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) ) Rule(self, '{}CanaryRule'.format(id), enabled=True, schedule=Schedule.cron(), targets=[LambdaFunction(handler=function)]) return function
async def create_site_function(self, id: str, domain: str, cdn_name: str) -> Function: env = { 'PROD': 'True', 'SITE_DOMAIN': domain, 'APP_VERSION': '0.02', 'STATIC_DOMAIN': cdn_name, 'PROD': 'True' } site_code_asset = Asset( self, '{}FunctionAsset'.format(id), path='site_function') site_code = S3Code( bucket=site_code_asset.bucket, key=site_code_asset.s3_object_key) return Function( self, '{}Function'.format(id), timeout=Duration.seconds(3), code=site_code, handler='site_function.handler', environment=env, tracing=Tracing.ACTIVE, initial_policy=[DDB_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) )
def __init__(self, scope: Construct, construct_id: str, env: Environment) -> None: super().__init__(scope, construct_id, env=env) smol_table = SmolTable(self, "SmolTable", table_name=TABLE_NAME) smol_vpc = Vpc.from_lookup(self, "CoreVPC", vpc_name=VPC_NAME) smol_subnets = SubnetSelection( one_per_az=True, subnet_type=SubnetType.PRIVATE, ) smol_lambda = Function( self, "SmolAPI", code=Code.from_asset_image(directory=abspath("./")), environment={ "CAPTCHA_KEY": environ["CAPTCHA_KEY"], "SAFE_BROWSING_KEY": environ["SAFE_BROWSING_KEY"], }, function_name=FUNCTION_NAME, handler=Handler.FROM_IMAGE, log_retention=RetentionDays.ONE_WEEK, memory_size=MEMORY_ALLOCATION, reserved_concurrent_executions=RESERVED_CONCURRENCY, runtime=Runtime.FROM_IMAGE, timeout=Duration.seconds(TIMEOUT_SEC), tracing=Tracing.ACTIVE, vpc=smol_vpc, vpc_subnets=smol_subnets, ) smol_table.table.grant(smol_lambda, "dynamodb:DescribeTable") smol_table.table.grant(smol_lambda, "dynamodb:GetItem") smol_table.table.grant(smol_lambda, "dynamodb:PutItem") SmolTarget(self, "SmolTarget", smol_lambda, API_HOST)
def __init__(self, scope: cdk.Construct, construct_id: str, lambda_context: str, **kwargs) -> None: super().__init__(scope, construct_id) fn = dict(self.node.try_get_context(lambda_context)) lambda_fn = Function( self, fn["fn_name"], function_name=fn["fn_name"], runtime=Runtime.PYTHON_3_8, handler=fn["fn_handler"], code=Code.from_asset(fn["fn_path"]), tracing=Tracing.ACTIVE, current_version_options={ "removal_policy": cdk.RemovalPolicy.RETAIN }, retry_attempts=fn["fn_retry_attempts"], timeout=Duration.seconds(fn["fn_timeout"]), reserved_concurrent_executions=fn["fn_reserved_concurrency"]) lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"]) # # Outputs cdk.CfnOutput(self, fn["fn_name"] + 'Arn', value=lambda_fn.function_arn) self._function = lambda_fn self._function_alias = lambda_fn_alias
def __init__(self, scope: Stack): super().__init__( scope=scope, id=f'{TestingStack.global_prefix()}FunctionWithUnitTests', code=Code.from_asset(root), handler='handler.handler', runtime=Runtime.PYTHON_3_8, timeout=Duration.minutes(5), memory_size=512, layers=[ Layer( scope=scope, name= f'{TestingStack.global_prefix()}TestingLayerWithUnitTests', dependencies={ # These dependencies are required for running unit tests inside lambda functions. # Pytest is used for running actual unit tests. 'pytest': PackageVersion.from_string_version('6.2.5'), # Pook is used for HTTP mocking, therefore it is also needed here. 'pook': PackageVersion.from_string_version('1.0.1'), # Not sure about this dependency. Lambda runtime throws errors if its missing. 'aws-cdk.core': PackageVersion.from_string_version('1.99.0'), # This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this. # For some reason it doesn't. # Tests would fail with import error otherwise. 'importlib-resources': PackageVersion.from_string_version('5.4.0') }) ])
def __init__(self, scope: Stack, name: str) -> None: """ Constructor. :param scope: CloudFormation stack in which this function will be deployed. :param name: The name of the function. """ self.__name = name super().__init__( scope=scope, id=name, uuid=f'{name}-uuid', function_name=name, code=self.__code(), handler='index.handler', runtime=Runtime.PYTHON_3_8, timeout=Duration.minutes(1), ) # Add permission to create deployments. Since this is a singleton lambda function, # we can not specify a specific api gateway resource. self.add_to_role_policy( PolicyStatement(actions=['apigateway:POST', 'apigateway:PATCH'], resources=['*']))
def _create_eks_service_lambda(self) -> aws_lambda.Function: return lambda_python.PythonFunction( scope=self, id="eks_service_lambda", function_name=f"orbit-{self.context.name}-eks-service-handler", entry=_lambda_path("eks_service_handler"), index="index.py", handler="handler", runtime=aws_lambda.Runtime.PYTHON_3_8, timeout=Duration.seconds(5), environment={ "REGION": self.context.region, }, initial_policy=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["eks:List*", "eks:Describe*"], resources=[ f"arn:aws:eks:{self.context.region}:{self.context.account_id}:cluster/orbit-*", f"arn:aws:eks:{self.context.region}:{self.context.account_id}:nodegroup/orbit-*/*/*", ], ) ], )
def _create_token_validation_lambda(self) -> aws_lambda.Function: return lambda_python.PythonFunction( scope=self, id="token_validation_lambda", function_name=f"orbit-{self.context.name}-token-validation", entry=_lambda_path("token_validation"), index="index.py", handler="handler", runtime=aws_lambda.Runtime.PYTHON_3_8, timeout=Duration.seconds(5), environment={ "COGNITO_USER_POOL_ID": self.user_pool.user_pool_id, "REGION": self.context.region, "COGNITO_USER_POOL_CLIENT_ID": self.user_pool_client.user_pool_client_id, }, initial_policy=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ec2:Describe*", "logs:Create*", "logs:PutLogEvents", "logs:Describe*" ], resources=["*"], ) ], )
def create_lambda(self, envs: EnvSettings): is_app_only = self.node.try_get_context("is_app_only") if is_app_only == "true": code = Code.from_asset( path="../backend/functions/image_resize/.serverless/main.zip") else: code = Code.from_cfn_parameters() function = Function( self, "image-resize-lambda", function_name=f"{envs.project_name}-image-resize", code=code, handler="index.handler", runtime=Runtime.NODEJS_12_X, memory_size=512, timeout=Duration.seconds(30), tracing=Tracing.ACTIVE, ) api_gateway = LambdaRestApi( self, "ImageResizeLambdaApi", rest_api_name=f"{envs.project_name}-image-resize", handler=function) return function, code, api_gateway
def _create_queues(self): post_anime_dl = Queue(self, "post_anime_dl") self.post_anime_queue = Queue( self, "anime", dead_letter_queue=DeadLetterQueue(max_receive_count=5, queue=post_anime_dl), receive_message_wait_time=Duration.seconds(20) )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here queue = sqs.Queue( self, "MyFirstQueue", visibility_timeout=Duration.seconds(300), )
def new_codebuild_task( self, project: _codebuild.Project) -> _tasks.CodeBuildStartBuild: return _tasks.CodeBuildStartBuild( self, f"{project}task", project=project, timeout=Duration.minutes(10), integration_pattern=_step_fn.IntegrationPattern.RUN_JOB)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # ECR Repository ecr_repo = ecr.Repository( self, "EcrRepository", repository_name="intro_lambda_container" ) # Lambda Functions lam.Function( self, "LambdaFunctionPythonBaseImage", function_name="intro-lambda-container-python-base-image", code=lam.Code.from_ecr_image( repository=ecr_repo, tag="python-base-image" ), handler=lam.Handler.FROM_IMAGE, runtime=lam.Runtime.FROM_IMAGE, timeout=Duration.seconds(60) ) lam.Function( self, "LambdaFunctionPythonCustomImage", function_name="intro-lambda-container-python-custom-image", code=lam.Code.from_ecr_image( repository=ecr_repo, tag="python-custom-image" ), handler=lam.Handler.FROM_IMAGE, runtime=lam.Runtime.FROM_IMAGE, timeout=Duration.seconds(60) ) lam.Function( self, "LambdaFunctionRustCustomImage", function_name="intro-lambda-container-rust-custom-image", code=lam.Code.from_ecr_image( repository=ecr_repo, tag="rust-custom-image" ), handler=lam.Handler.FROM_IMAGE, runtime=lam.Runtime.FROM_IMAGE, timeout=Duration.seconds(60) )
def _create_lambdas(self): clean_pycache() for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) lambda_args = { "code": Code.from_asset(root), "handler": "__init__.handle", "runtime": Runtime.PYTHON_3_8, "layers": layers, "function_name": name, "environment": lambda_config["variables"], "role": lambda_role, "timeout": Duration.seconds(lambda_config["timeout"]), "memory_size": lambda_config["memory"], } if "concurrent_executions" in lambda_config: lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"] self.lambdas[name] = Function(self, name, **lambda_args) self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue)) Rule( self, "titles_updater", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["crons-titles_updater"])] ) Rule( self, "episodes_updater", schedule=Schedule.cron(hour="4", minute="10"), targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])] )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) stream = kds.Stream(self, "InputStream", shard_count=1, retention_period=Duration.hours(24)) self._stream = stream
class Config: # Data bucket settings data_bucket_name = 'epg-data-s3-bucket-42' data_bucket_noncurrent_version_expiration = Duration.days(30) # Out bucket settings out_bucket_name = 'epg-out-s3-bucket-42' # Notifications email_recipient = '*****@*****.**' # Update function update_function_rate = Duration.minutes(5) error_count_to_notify = 12 @staticmethod def period_to_check_error_count() -> Duration: return Duration.minutes(Config.update_function_rate.to_minutes() * Config.error_count_to_notify * 2)
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) table_name = "posts2" function_name = "cl2" email = "*****@*****.**" table = Table( self, "cl_posts", table_name=table_name, partition_key=Attribute(name="url", type=AttributeType.STRING), time_to_live_attribute="ttl", ) function = PythonFunction( self, "cl_function", function_name=function_name, entry="src", index="app.py", runtime=Runtime.PYTHON_3_8, environment={ "cl_email": email, "cl_table_name": table_name }, timeout=Duration.seconds(300), initial_policy=[ PolicyStatement( actions=["ses:SendEmail", "ses:VerifyEmailIdentity"], resources=[ f"arn:aws:ses:{self.region}:{self.account}:identity/{email}" ], ), PolicyStatement( actions=[ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem" ], resources=[table.table_arn], ), ], ) with open("events/event.json") as f: event = json.load(f) Rule( self, "cl_schedule", schedule=Schedule.expression("cron(0 19 * * ? *)"), targets=[ LambdaFunction(function, event=RuleTargetInput.from_object(event)) ], )
def _create_job_processing_queue(self, project_name: str, lambda_size: int): return Queue( self, f"DataProcessingQueue-{lambda_size}", queue_name=f"{project_name}-data-processing-{lambda_size}", visibility_timeout=Duration.seconds(300), dead_letter_queue=DeadLetterQueue( queue=self.dead_letter_queue, max_receive_count=self.job_processing_max_retries), )