def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.event_bus = EventBus(scope=self, id='CustomEventBus', event_bus_name='CustomEventBus') self.source = Function( scope=self, id=f'SourceFunction', function_name=f'SourceFunction', code=Code.from_asset(path='./code_source/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.source.add_to_role_policy(statement=PolicyStatement( actions=['events:PutEvents'], resources=[self.event_bus.event_bus_arn])) """ Define rule. """ self.rule = Rule( scope=self, id='EventBusRule', description='Sample description.', enabled=True, event_bus=self.event_bus, event_pattern=EventPattern(detail={ 'Domain': ["MedInfo"], 'Reason': ["InvokeTarget"] }), rule_name='EventBusRule', ) """ Add target. """ self.target = Function( scope=self, id=f'TargetFunction', function_name=f'TargetFunction', code=Code.from_asset(path='./code_target/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.target: Union[IRuleTarget, LambdaFunction] = LambdaFunction( handler=self.target) self.rule.add_target(target=self.target)
def api_lambda_function(scope, name, handler, apigw, path, method, layer, tables, code="./backend"): _lambda = Function( scope, name, handler=handler, runtime=Runtime.PYTHON_3_8, code=Code.asset(code), tracing=Tracing.ACTIVE, layers=layer, ) _lambda.add_environment("POLL_TABLE", tables[0].table_name) _lambda.add_environment("MAIN_PAGE_GSI", "main_page_gsi") apigw.add_routes( path=path, methods=[method], integration=LambdaProxyIntegration(handler=_lambda), ) return _lambda
def __init__(self, scope: Construct, construct_id: str, env: Environment) -> None: super().__init__(scope, construct_id, env=env) smol_table = SmolTable(self, "SmolTable", table_name=TABLE_NAME) smol_vpc = Vpc.from_lookup(self, "CoreVPC", vpc_name=VPC_NAME) smol_subnets = SubnetSelection( one_per_az=True, subnet_type=SubnetType.PRIVATE, ) smol_lambda = Function( self, "SmolAPI", code=Code.from_asset_image(directory=abspath("./")), environment={ "CAPTCHA_KEY": environ["CAPTCHA_KEY"], "SAFE_BROWSING_KEY": environ["SAFE_BROWSING_KEY"], }, function_name=FUNCTION_NAME, handler=Handler.FROM_IMAGE, log_retention=RetentionDays.ONE_WEEK, memory_size=MEMORY_ALLOCATION, reserved_concurrent_executions=RESERVED_CONCURRENCY, runtime=Runtime.FROM_IMAGE, timeout=Duration.seconds(TIMEOUT_SEC), tracing=Tracing.ACTIVE, vpc=smol_vpc, vpc_subnets=smol_subnets, ) smol_table.table.grant(smol_lambda, "dynamodb:DescribeTable") smol_table.table.grant(smol_lambda, "dynamodb:GetItem") smol_table.table.grant(smol_lambda, "dynamodb:PutItem") SmolTarget(self, "SmolTarget", smol_lambda, API_HOST)
async def create_site_function(self, id: str, domain: str, cdn_name: str) -> Function: env = { 'PROD': 'True', 'SITE_DOMAIN': domain, 'APP_VERSION': '0.02', 'STATIC_DOMAIN': cdn_name, 'PROD': 'True' } site_code_asset = Asset( self, '{}FunctionAsset'.format(id), path='site_function') site_code = S3Code( bucket=site_code_asset.bucket, key=site_code_asset.s3_object_key) return Function( self, '{}Function'.format(id), timeout=Duration.seconds(3), code=site_code, handler='site_function.handler', environment=env, tracing=Tracing.ACTIVE, initial_policy=[DDB_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) )
def __init__( self, app: App, id: str, code_txt: str, runtime: str, handler: str, env: dict, policy: str) -> None: super().__init__(app, id) function_role = Role( self, 'NonLazyRole', assumed_by=ServicePrincipal('lambda.amazonaws.com')) self.function = Function( self, 'Function'.format('{}'.format(id)), code=Code.inline(code_txt), runtime=Runtime('python3.7', supports_inline_code=True), handler='index.create', environment=env, initial_policy=[policy], tracing=Tracing.ACTIVE, role=function_role )
async def create_canary_function(self, id: str) -> Function: function = None with open('canary/canary.py', 'r') as code: canary_code = code.read() function = Function( self, '{}CanaryFunction'.format(id), timeout=Duration.seconds(3), code=InlineCode(canary_code), handler='index.handler', tracing=Tracing.ACTIVE, initial_policy=[MINIMAL_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) ) Rule(self, '{}CanaryRule'.format(id), enabled=True, schedule=Schedule.cron(), targets=[LambdaFunction(handler=function)]) return function
def init_lambda(self): tmp_dir = install_lambda_code_requirements() lambda_code = Code.from_asset(str(tmp_dir), exclude=[ ".env", "__main*", "*.dist-info", "bin", "requirements.txt", ]) lambda_function = Function(self, "lambda", code=lambda_code, handler="main.handler", runtime=Runtime.PYTHON_3_8) lambda_function.role.assume_role_policy.add_statements( PolicyStatement( actions=["sts:AssumeRole"], principals=[ServicePrincipal("edgelambda.amazonaws.com")])) version = Version(self, "version", lambda_=lambda_function) apply_removal_policy(lambda_function, version, lambda_function.role) return version
def __init__(self, scope: cdk.Construct, construct_id: str, lambda_context: str, **kwargs) -> None: super().__init__(scope, construct_id) fn = dict(self.node.try_get_context(lambda_context)) lambda_fn = Function( self, fn["fn_name"], function_name=fn["fn_name"], runtime=Runtime.PYTHON_3_8, handler=fn["fn_handler"], code=Code.from_asset(fn["fn_path"]), tracing=Tracing.ACTIVE, current_version_options={ "removal_policy": cdk.RemovalPolicy.RETAIN }, retry_attempts=fn["fn_retry_attempts"], timeout=Duration.seconds(fn["fn_timeout"]), reserved_concurrent_executions=fn["fn_reserved_concurrency"]) lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"]) # # Outputs cdk.CfnOutput(self, fn["fn_name"] + 'Arn', value=lambda_fn.function_arn) self._function = lambda_fn self._function_alias = lambda_fn_alias
def create_lambda(self, envs: EnvSettings): is_app_only = self.node.try_get_context("is_app_only") if is_app_only == "true": code = Code.from_asset( path="../backend/functions/image_resize/.serverless/main.zip") else: code = Code.from_cfn_parameters() function = Function( self, "image-resize-lambda", function_name=f"{envs.project_name}-image-resize", code=code, handler="index.handler", runtime=Runtime.NODEJS_12_X, memory_size=512, timeout=Duration.seconds(30), tracing=Tracing.ACTIVE, ) api_gateway = LambdaRestApi( self, "ImageResizeLambdaApi", rest_api_name=f"{envs.project_name}-image-resize", handler=function) return function, code, api_gateway
def __init__(self, scope: Construct): super().__init__(scope=scope) function = Function( scope=self, id=f'{self.global_prefix()}TestingFunction', code=Code.from_inline( 'from b_lambda_layer_common import api_gateway\n' 'from b_lambda_layer_common import exceptions\n' 'from b_lambda_layer_common import ssm\n' 'from b_lambda_layer_common import util\n' 'import boto3\n' 'import botocore\n' '\n\n' 'def handler(*args, **kwargs):\n' ' return dict(\n' ' Boto3Version=boto3.__version__,\n' ' BotocoreVersion=botocore.__version__,\n' ' )' '\n'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[ Layer(scope=self, name=f'{self.global_prefix()}TestingLayer', dependencies={ 'boto3': PackageVersion.from_string_version('1.16.35'), 'botocore': PackageVersion.from_string_version('1.19.35'), }) ]) self.add_output(self.LAMBDA_FUNCTION_NAME_KEY, value=function.function_name) # Create another function that is not using boto3. Function( scope=self, id=f'{self.global_prefix()}TestingFunction2', code=Code.from_inline('def handler(*args, **kwargs): return 200'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[ Layer(scope=self, name=f'{self.global_prefix()}TestingLayer2') ])
def _create_lambdas(self): clean_pycache() for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) lambda_args = { "code": Code.from_asset(root), "handler": "__init__.handle", "runtime": Runtime.PYTHON_3_8, "layers": layers, "function_name": name, "environment": lambda_config["variables"], "role": lambda_role, "timeout": Duration.seconds(lambda_config["timeout"]), "memory_size": lambda_config["memory"], } if "concurrent_executions" in lambda_config: lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"] self.lambdas[name] = Function(self, name, **lambda_args) self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue)) Rule( self, "titles_updater", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["crons-titles_updater"])] ) Rule( self, "episodes_updater", schedule=Schedule.cron(hour="4", minute="10"), targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])] )
def __init__(self, scope: Stack): super().__init__(scope=scope, id=f'TestingStack', stack_name=f'TestingStack') Function(scope=self, id='TestingFunction', code=Code.from_inline('def handler(): return "Hello World!"'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[TwilioLayer(self, 'TestingTwilioLayer')])
def create_other_lambda(self, function_name): function_path = str(self.lambda_path_base.joinpath(function_name)) return Function( self, f'id_{function_name}', code=AssetCode(function_path), handler='lambda_function.lambda_handler', runtime=Runtime.PYTHON_3_7, function_name=f'sfn_{function_name}_lambda', memory_size=128, timeout=core.Duration.seconds(10), )
def deploy_aws_ecs_public_dns(self): code_path = join(dirname(dirname(__file__)), 'build', 'aws-ecs-public-dns.zip') func = Function(self._stack, 'public_dns', runtime=Runtime.NODEJS_12_X, handler='src/update-task-dns.handler', memory_size=128, code=Code.from_asset(path=code_path)) self._tag_it(func) func.add_to_role_policy( statement=self.get_public_dns_policy_statement()) self.create_event_rule(func)
def create_function(self, other_stack: Stack, id, *, code: AssetCode, handler: str, runtime: Runtime) -> IVersion: func = Function( self, id, code=code, handler=handler, runtime=runtime, role=self._role, ) # If code/runtime changes, CDK doesn't re-evaluate the version. In # result, we store an old version, and things don't work. But we also # don't want to generate a new version every run. The compromise: use # the sha256 hash of the index file. with open(f"{code.path}/index.js", "rb") as f: sha256 = hashlib.sha256(f.read()).hexdigest() version = func.add_version(f"Version-{sha256}") # Create an entry in the parameter-store that tells the arn of this lambda parameter_name = parameter_store.get_parameter_name( f"/LambdaEdge/{id}") StringParameter( self, parameter_name, string_value=Fn.join( ":", [ func.function_arn, version.version, ], ), parameter_name=parameter_name, ) other_stack.add_dependency(self) # Create a custom resource that fetches the arn of the lambda cross_region_func = LambdaEdgeFunction( other_stack, f"LambdaEdgeFunction-{sha256}", parameter_name=parameter_name, policy=AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE), ) # Create the lambda function based on this arn return Version.from_version_arn(other_stack, id, cross_region_func.get_arn())
def __init__(self, scope: core.Construct, construct_id: str, lambda_context: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) fn = dict(self.node.try_get_context(lambda_context)) # lambda dlq lambda_fn_dlq = _sqs.Queue(self, fn["fn_dlq_name"], queue_name=fn["fn_dlq_name"]) lambda_fn = Function( self, fn["fn_name"], function_name=fn["fn_name"], runtime=Runtime.PYTHON_3_8, handler=fn["fn_handler"], code=Code.from_asset(fn["fn_path"]), tracing=Tracing.ACTIVE, current_version_options={ "removal_policy": core.RemovalPolicy.RETAIN }, environment={ "ENVIRONMENT_VALUE": "DUMMY_VALUE", }, dead_letter_queue=lambda_fn_dlq, retry_attempts=fn["fn_retry_attempts"], timeout=Duration.seconds(fn["fn_timeout"]), reserved_concurrent_executions=fn["fn_reserved_concurrency"]) lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"]) lambda_fn_dlq.grant_send_messages(lambda_fn) # # Outputs core.CfnOutput(self, fn["fn_name"] + 'Arn', value=(lambda_fn.function_arn)) self._function = lambda_fn self._function_alias = lambda_fn_alias self._function_dlq = lambda_fn_dlq
def functions_for( self, name, base, handlers, libs=None, timeout=Duration.minutes(5), runtime=Runtime.PYTHON_3_8, layers=None, ) -> Dict[str, Function]: if isinstance(handlers, str): handlers = [handlers] if not isinstance(handlers, list): raise ValueError("handlers must be a string or a list of handlers") if isinstance(libs, str): libs = [libs] if isinstance(layers, str): layers = [layers] if libs and not isinstance(libs, list): raise ValueError("libs must be a string or a list of libraries") bundling = self._get_bundling(base, libs=libs) code = Code.from_asset(str(self.source_path), bundling=bundling) role = self.build_lambda_role(name) functions = {} for handler in handlers: func_name = name + handler.split(".")[0].replace( "_", " ").title().replace(" ", "").replace("Handler", "") functions.update({ func_name: Function( self, func_name, handler=handler, code=code, runtime=runtime, timeout=timeout, role=role, layers=layers, environment={"LOG_LEVEL": self.log_level}, ) }) return functions
def _create_lambdas(self): for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role(self, f"{name}_role", assumed_by=ServicePrincipal( service="lambda.amazonaws.com")) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole")) self.lambdas[name] = Function( self, name, code=Code.from_asset(root), handler="__init__.handle", runtime=Runtime.PYTHON_3_8, layers=layers, function_name=name, environment=lambda_config["variables"], role=lambda_role, timeout=Duration.seconds(lambda_config["timeout"]), memory_size=lambda_config["memory"], ) Rule(self, "update_eps", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket = Bucket( scope=self, id='WorkshopBucketWithCDK', bucket_name='cloudvisor-workshop-bucket-with-cdk' ) function = Function( scope=self, id='WorkshopFunctionWithCDK', function_name='WorkshopFunctionWithCDK', runtime=Runtime.PYTHON_3_6, handler='index.handler', code=Code.from_inline('def handler(*args, **kwargs): print(args); return 200') ) bucket.add_object_created_notification(LambdaDestination(function))
def _create_lambda_fn(self, envs: EnvSettings, memory_size: int, queue: Queue): is_app_only = self.node.try_get_context("is_app_only") if is_app_only == "true": code = Code.from_asset( path="../backend/functions/worker/.serverless/main.zip") else: code = Code.from_cfn_parameters() function = Function( self, f"data-processing-worker-{memory_size}", function_name=f"{envs.project_name}-data-processing-{memory_size}", code=code, runtime=Runtime.PYTHON_3_8, handler="handler.main", environment={ "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name, "IMAGE_SCRAPING_FETCH_TIMEOUT": "15", "AWS_IMAGE_STORAGE_BUCKET_NAME": self.resize_lambda_image_bucket.bucket_name, "AWS_IMAGE_STATIC_URL": self.resize_lambda_image_bucket.bucket_website_url, "BACKEND_URL": self.backend_url, "LAMBDA_AUTH_TOKEN": self.lambda_auth_token.secret_value.to_string(), }, memory_size=memory_size, timeout=Duration.seconds(300), tracing=Tracing.ACTIVE, ) function.add_event_source(SqsEventSource(queue, batch_size=1)) self.app_bucket.grant_read_write(function.role) self.resize_lambda_image_bucket.grant_read_write(function.role) return function, code
def test_FUNC_hash_WITH_valid_parameters_EXPECT_hash_created(): """ Test that hashing is consistent and works as expected. :return: No return. """ stack = Stack(App(), 'TestStack') integration = LambdaIntegration( scope=stack, integration_name='TestIntegration', api=CfnApi(stack, 'TestApi'), lambda_function=Function( stack, 'TestLambdaFunction', code=Code.from_inline('def handler(*args, **kwargs): return 123'), handler='index.handler', runtime=Runtime.PYTHON_3_6)) assert integration.hash == 'ab93cecc508e529c3791ba48a1275deec88cdd6b43a7e1d443906df48fa300e4'
def __init__(self, scope: Stack, prefix: str) -> None: stack_name = f'{prefix}ExampleStack' super().__init__(scope, id=stack_name, stack_name=stack_name) lambda_name = f'{prefix}ExampleLambdaFunction' self.function = Function( scope=self, id=lambda_name, function_name=lambda_name, handler='index.handler', runtime=Runtime.PYTHON_3_6, code=Code.from_inline('def handler(*args, **kwargs): ' ' return {' ' "isBase64Encoded": False,' ' "statusCode": 200,' ' "headers": { },' ' "body": "Hello from lambda function!"' '}')) api_name = f'{prefix}ExampleRestApi' self.api = LambdaRestApi(self, api_name, handler=self.function)
def create_ecs_lambda(self, cluster: ICluster, auto_scaling_group: AutoScalingGroup): lambda_func = Function( self, "LambdaECS", code=Code.from_asset("./lambdas/nlb-ecs"), handler="index.lambda_handler", runtime=Runtime.PYTHON_3_8, timeout=Duration.seconds(30), environment={ "AUTO_SCALING_GROUP_NAME": auto_scaling_group.auto_scaling_group_name, }, ) lambda_func.add_to_role_policy( PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingGroups", "ssm:SendCommand", "ssm:GetCommandInvocation", ], resources=[ "*", ], )) Rule( self, "ECS", event_pattern=EventPattern( detail_type=["ECS Task State Change"], detail={ "clusterArn": [cluster.cluster_arn], }, source=["aws.ecs"], ), targets=[LambdaFunction(lambda_func)], )
def create_first_lambda(self): function_path = str(self.lambda_path_base.joinpath('first')) code = AssetCode(function_path) scipy_layer = LayerVersion.from_layer_version_arn( self, f'sfn_scipy_layer_for_first', AWS_SCIPY_ARN) return Function( self, f'id_first', # Lambda本体のソースコードがあるディレクトリを指定 code=code, # Lambda本体のハンドラ名を指定 handler='lambda_function.lambda_handler', # ランタイムの指定 runtime=Runtime.PYTHON_3_7, # 環境変数の設定 environment={'BUCKET_NAME': self.bucket.bucket_name}, function_name='sfn_first_lambda', layers=[scipy_layer], memory_size=128, role=self.role, timeout=core.Duration.seconds(10), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # SQS queue state_change_sqs = Queue( self, "state_change_sqs", visibility_timeout=core.Duration.seconds(60) ) # Dynamodb Tables # EC2 state changes tb_states = Table( self, "ec2_states", partition_key=Attribute(name="instance-id", type=AttributeType.STRING), sort_key=Attribute( name="time", type=AttributeType.STRING ), billing_mode=BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, stream=StreamViewType.NEW_IMAGE) # EC2 inventory tb_inventory = Table( self, "ec2_inventory", partition_key=Attribute(name="instance-id", type=AttributeType.STRING), sort_key=Attribute( name="time", type=AttributeType.STRING ), billing_mode=BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, stream=StreamViewType.KEYS_ONLY) # IAM policies - AWS managed basic_exec = ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole") sqs_access = ManagedPolicy(self, "LambdaSQSExecution", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "sqs:ReceiveMessage", "sqs:DeleteMessage", "sqs:GetQueueAttributes" ], resources=[state_change_sqs.queue_arn] )]) # IAM Policies pol_ec2_states_ro = ManagedPolicy(self, "pol_EC2StatesReadOnly", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DescribeStream", "dynamodb:GetRecords", "dynamodb:GetItem", "dynamodb:GetShardIterator", "dynamodb:ListStreams" ], resources=[tb_states.table_arn] )]) pol_ec2_states_rwd = ManagedPolicy( self, "pol_EC2StatesWriteDelete", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DeleteItem", "dynamodb:DescribeTable", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem" ], resources=[tb_states.table_arn] )]) pol_ec2_inventory_full = ManagedPolicy( self, "pol_EC2InventoryFullAccess", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DeleteItem", "dynamodb:DescribeTable", "dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem" ], resources=[tb_inventory.table_arn] )]) pol_lambda_describe_ec2 = ManagedPolicy( self, "pol_LambdaDescribeEC2", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "ec2:Describe*" ], resources=["*"] )]) # IAM Roles rl_event_capture = Role( self, 'rl_state_capture', assumed_by=ServicePrincipal('lambda.amazonaws.com'), managed_policies=[basic_exec, sqs_access, pol_ec2_states_rwd] ) rl_event_processor = Role( self, 'rl_state_processor', assumed_by=ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ basic_exec, pol_ec2_states_ro, pol_ec2_states_rwd, pol_ec2_inventory_full, pol_lambda_describe_ec2]) # event capture lambda lambda_event_capture = Function( self, "lambda_event_capture", handler="event_capture.handler", runtime=Runtime.PYTHON_3_7, code=Code.asset('event_capture'), role=rl_event_capture, events=[SqsEventSource(state_change_sqs)], environment={"state_table": tb_states.table_name} ) # event processor lambda lambda_event_processor = Function( self, "lambda_event_processor", handler="event_processor.handler", runtime=Runtime.PYTHON_3_7, code=Code.asset('event_processor'), role=rl_event_processor, events=[ DynamoEventSource( tb_states, starting_position=StartingPosition.LATEST) ], environment={ "inventory_table": tb_inventory.table_name, } ) # Cloudwatch Event event_ec2_change = Rule( self, "ec2_state_change", description="trigger on ec2 start, stop and terminate instances", event_pattern=EventPattern( source=["aws.ec2"], detail_type=["EC2 Instance State-change Notification"], detail={ "state": [ "running", "stopped", "terminated"] } ), targets=[aws_events_targets.SqsQueue(state_change_sqs)] ) # Outputs core.CfnOutput(self, "rl_state_capture_arn", value=rl_event_capture.role_arn) core.CfnOutput(self, "rl_state_processor_arn", value=rl_event_processor.role_arn) core.CfnOutput(self, "tb_states_arn", value=tb_states.table_arn) core.CfnOutput(self, "tb_inventory_arn", value=tb_inventory.table_arn) core.CfnOutput(self, "sqs_state_change", value=state_change_sqs.queue_arn)
def __init__( self, scope: Construct, id: str, *, vpc: IVpc, cluster: ICluster, service: IEc2Service, ecs_security_group: SecurityGroup, deployment: Deployment, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) security_group = SecurityGroup( self, "LambdaSG", vpc=vpc, ) lambda_func = Function( self, "ReloadLambda", code=Code.from_asset("./lambdas/bananas-reload"), handler="index.lambda_handler", runtime=Runtime.PYTHON_3_8, timeout=Duration.seconds(120), environment={ "CLUSTER": cluster.cluster_arn, "SERVICE": service.service_arn, }, vpc=vpc, security_groups=[security_group, ecs_security_group], reserved_concurrent_executions=1, ) lambda_func.add_to_role_policy( PolicyStatement( actions=[ "ec2:DescribeInstances", "ecs:DescribeContainerInstances", "ecs:DescribeTasks", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTagsForResource", "ecs:ListTasks", ], resources=[ "*", ], ) ) policy = ManagedPolicy(self, "Policy") policy.add_statements( PolicyStatement( actions=[ "lambda:InvokeFunction", ], resources=[lambda_func.function_arn], ) )
def __init__(self, scope: Construct, id: str, elasticsearch_index: ElasticsearchIndexResource, dynamodb_table: Table, kms_key: Optional[Key] = None, *, sagemaker_endpoint_name: str = None, sagemaker_endpoint_arn: str = None, sagemaker_embeddings_key: str = None) -> None: super().__init__(scope=scope, id=id) elasticsearch_layer = BElasticsearchLayer( scope=self, name=f"{id}ElasticsearchLayer") if bool(sagemaker_endpoint_name) ^ bool(sagemaker_embeddings_key): raise ValueError( f'In order to use sentence embedding, all of the following enviroment variables are required: ' f'SAGEMAKER_ENDPOINT_NAME, SAGEMAKER_EMBEDDINGS_KEY. ' f'Else, provide none of above.') if sagemaker_endpoint_name and not sagemaker_endpoint_arn: sagemaker_endpoint_arn = self.__resolve_sagemaker_endpoints_arn( '*') optional_sagemaker_parameters = { 'SAGEMAKER_ENDPOINT_NAME': sagemaker_endpoint_name or None, 'SAGEMAKER_EMBEDDINGS_KEY': sagemaker_embeddings_key or None } initial_cloner_function = SingletonFunction( scope=self, id='InitialClonerFunction', uuid='e01116a4-f939-43f2-8f5b-cc9f862c9e01', lambda_purpose='InitialClonerSingletonLambda', code=Code.from_asset(initial_cloner_root), handler='index.handler', runtime=Runtime.PYTHON_3_8, layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, timeout=Duration.minutes(15), role=Role( scope=self, id='InitialClonerFunctionRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'LogsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents', 'logs:DescribeLogStreams', ], resources=['arn:aws:logs:*:*:*'], effect=Effect.ALLOW, ) ]), 'ElasticsearchPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'es:ESHttpDelete', 'es:ESHttpGet', 'es:ESHttpHead', 'es:ESHttpPatch', 'es:ESHttpPost', 'es:ESHttpPut', ], resources=['*'], effect=Effect.ALLOW, ) ]), 'DynamodbPolicy': PolicyDocument(statements=[ PolicyStatement( actions=['dynamodb:*'], resources=['*'], effect=Effect.ALLOW, ) ]), }, description='Role for DynamoDB Initial Cloner Function', ), ) if kms_key: initial_cloner_function.add_to_role_policy( PolicyStatement( actions=['kms:Decrypt'], resources=[kms_key.key_arn], effect=Effect.ALLOW, ), ) initial_cloner = CustomResource( scope=self, id='InitialCloner', service_token=initial_cloner_function.function_arn, removal_policy=RemovalPolicy.DESTROY, properties={ 'DynamodbTableName': dynamodb_table.table_name, 'ElasticsearchIndexName': elasticsearch_index.index_name, 'ElasticsearchEndpoint': elasticsearch_index.elasticsearch_domain.domain_endpoint, }, resource_type='Custom::ElasticsearchInitialCloner', ) primary_key_field = initial_cloner.get_att_string('PrimaryKeyField') dynamodb_stream_arn = dynamodb_table.table_stream_arn if not dynamodb_stream_arn: raise Exception('DynamoDB streams must be enabled for the table') dynamodb_event_source = DynamoEventSource( table=dynamodb_table, starting_position=StartingPosition.LATEST, enabled=True, max_batching_window=Duration.seconds(10), bisect_batch_on_error=True, parallelization_factor=2, batch_size=1000, retry_attempts=10, ) cloner_inline_policies = { 'LogsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents', 'logs:DescribeLogStreams', ], resources=['arn:aws:logs:*:*:*'], effect=Effect.ALLOW, ) ]), 'ElasticsearchPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'es:ESHttpDelete', 'es:ESHttpGet', 'es:ESHttpHead', 'es:ESHttpPatch', 'es:ESHttpPost', 'es:ESHttpPut', ], resources=[ f'{elasticsearch_index.elasticsearch_domain.domain_arn}/*' ], effect=Effect.ALLOW, ) ]), 'DynamodbStreamsPolicy': PolicyDocument(statements=[ PolicyStatement( actions=[ 'dynamodb:DescribeStream', 'dynamodb:GetRecords', 'dynamodb:GetShardIterator', 'dynamodb:ListStreams', ], resources=[dynamodb_stream_arn], effect=Effect.ALLOW, ) ]), } if sagemaker_endpoint_arn: cloner_inline_policies['SagemakerPolicy'] = PolicyDocument( statements=[ PolicyStatement(actions=['sagemaker:InvokeEndpoint'], resources=[sagemaker_endpoint_arn], effect=Effect.ALLOW) ]) cloner_function = Function( scope=self, id='ClonerFunction', code=Code.from_asset(cloner_root), handler='index.handler', runtime=Runtime.PYTHON_3_8, environment={ 'ES_INDEX_NAME': elasticsearch_index.index_name, 'ES_DOMAIN_ENDPOINT': elasticsearch_index.elasticsearch_domain.domain_endpoint, 'PRIMARY_KEY_FIELD': primary_key_field, **{ k: optional_sagemaker_parameters[k] for k in optional_sagemaker_parameters if all(optional_sagemaker_parameters.values( )) } }, events=[dynamodb_event_source], layers=[elasticsearch_layer], log_retention=RetentionDays.ONE_MONTH, memory_size=128, role=Role( scope=self, id='ClonerFunctionRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), inline_policies=cloner_inline_policies, description='Role for DynamoDB Cloner Function', ), timeout=Duration.seconds(30), ) if kms_key: cloner_function.add_to_role_policy( PolicyStatement( actions=['kms:Decrypt'], resources=[kms_key.key_arn], effect=Effect.ALLOW, ))
def __init__(self, scope: Stack, id: str, capacity: Optional[AddCapacityOptions] = None, cluster_name: Optional[str] = None, container_insights: Optional[bool] = None, default_cloud_map_namespace: Optional[ CloudMapNamespaceOptions] = None, vpc: Optional[IVpc] = None, **kwargs) -> None: known_args = dict( scope=scope, id=id, capacity=capacity, cluster_name=cluster_name, container_insights=container_insights, default_cloud_map_namespace=default_cloud_map_namespace, vpc=vpc) unknown_args = kwargs super().__init__(**{**known_args, **unknown_args}) self.__role = Role( scope=scope, id=cluster_name + 'CustomResourceRole', role_name=cluster_name + 'CustomResourceRole', assumed_by=CompositePrincipal( ServicePrincipal("lambda.amazonaws.com"), ServicePrincipal("cloudformation.amazonaws.com")), inline_policies={ cluster_name + 'CustomResourcePolicy': PolicyDocument(statements=[ PolicyStatement(actions=[ "ecs:ListClusters", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTaskDefinitions", "ecs:ListTasks", "ecs:DescribeClusters", "ecs:DescribeContainerInstances", "ecs:DescribeServices", "ecs:DescribeTaskDefinition", "ecs:DescribeTasks", "ecs:CreateCluster", "ecs:DeleteCluster", "ecs:DeleteService", "ecs:DeregisterContainerInstance", "ecs:DeregisterTaskDefinition", "ecs:StopTask", "ecs:UpdateService", ], effect=Effect.ALLOW, resources=['*']), PolicyStatement(actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], effect=Effect.ALLOW, resources=['*']), ]) }, managed_policies=[]) self.__custom_backend = Function( scope=scope, id=cluster_name + 'Deleter', code=Code.from_asset(path=package_root), handler='index.handler', runtime=Runtime.PYTHON_3_6, description= f'A custom resource backend to delete ecs cluster ({cluster_name}) in the right way.', function_name=cluster_name + 'Deleter', memory_size=128, role=self.__role, timeout=Duration.seconds(900), ) # noinspection PyTypeChecker provider: ICustomResourceProvider = CustomResourceProvider.from_lambda( self.__custom_backend) self.__custom_resource = CustomResource( scope=scope, id=cluster_name + 'CustomResource', provider=provider, removal_policy=RemovalPolicy.DESTROY, properties={'clusterName': cluster_name}, resource_type='Custom::EmptyS3Bucket') # Make sure that custom resource is deleted before lambda function backend. self.__custom_resource.node.add_dependency(self.__custom_backend) # Make sure that custom resource is deleted before the bucket. self.__custom_resource.node.add_dependency(self)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) with open("stack/config.yml", 'r') as stream: configs = yaml.safe_load(stream) ### S3 core images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES") images_S3_bucket.add_cors_rule( allowed_methods=[_s3.HttpMethods.POST], allowed_origins=["*"] # add API gateway web resource URL ) ### SQS core image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE") image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE", dead_letter_queue={ "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"], "queue": image_deadletter_queue }) ### api gateway core api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway') api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"]) api_gateway_landing_page_resource = api_gateway_resource.add_resource('web') api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl') api_gateway_image_search_resource = api_gateway_resource.add_resource('search') ### landing page function get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE", function_name="ICS_GET_LANDING_PAGE", runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/landingPage")) get_landing_page_integration = LambdaIntegration( get_landing_page_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }]) ### cognito required_attribute = _cognito.StandardAttribute(required=True) users_pool = _cognito.UserPool(self, "ICS_USERS_POOL", auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up self_sign_up_enabled=configs["Cognito"]["SelfSignUp"]) user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", supported_identity_providers=["COGNITO"], allowed_o_auth_flows=["implicit"], allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"], user_pool_id=users_pool.user_pool_id, callback_ur_ls=[api_gateway_landing_page_resource.url], allowed_o_auth_flows_user_pool_client=True, explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"]) user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", user_pool=users_pool, cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"])) ### get signed URL function get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL", function_name="ICS_GET_SIGNED_URL", environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"] }, runtime=Runtime.PYTHON_3_7, handler="main.handler", code=Code.asset("./src/getSignedUrl")) get_signedurl_integration = LambdaIntegration( get_signedurl_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref) images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*") ### image massage function image_massage_function = Function(self, "ICS_IMAGE_MASSAGE", function_name="ICS_IMAGE_MASSAGE", timeout=core.Duration.seconds(6), runtime=Runtime.PYTHON_3_7, environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name}, handler="main.handler", code=Code.asset("./src/imageMassage")) images_S3_bucket.grant_write(image_massage_function, "processed/*") images_S3_bucket.grant_delete(image_massage_function, "new/*") images_S3_bucket.grant_read(image_massage_function, "new/*") new_image_added_notification = _s3notification.LambdaDestination(image_massage_function) images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, new_image_added_notification, _s3.NotificationKeyFilter(prefix="new/") ) image_queue.grant_send_messages(image_massage_function) ### image analyzer function image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS", function_name="ICS_IMAGE_ANALYSIS", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(10), environment={ "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name, "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "REGION": core.Aws.REGION, }, handler="main.handler", code=Code.asset("./src/imageAnalysis")) image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10)) image_queue.grant_consume_messages(image_massage_function) lambda_rekognition_access = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"], resources=["*"] ) image_analyzer_function.add_to_role_policy(lambda_rekognition_access) images_S3_bucket.grant_read(image_analyzer_function, "processed/*") ### API gateway finalizing self.add_cors_options(api_gateway_get_signedurl_resource) self.add_cors_options(api_gateway_landing_page_resource) self.add_cors_options(api_gateway_image_search_resource) ### database database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET", secret_name="rds-db-credentials/image-content-search-rds-secret", generate_secret_string=_secrets_manager.SecretStringGenerator( generate_string_key='password', secret_string_template='{"username": "******"}', exclude_punctuation=True, exclude_characters='/@\" \\\'', require_each_included_type=True ) ) database = _rds.CfnDBCluster(self, "ICS_DATABASE", engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type, engine_mode="serverless", database_name=configs["Database"]["Name"], enable_http_endpoint=True, deletion_protection=configs["Database"]["DeletionProtection"], master_username=database_secret.secret_value_from_json("username").to_string(), master_user_password=database_secret.secret_value_from_json("password").to_string(), scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty( auto_pause=configs["Database"]["Scaling"]["AutoPause"], min_capacity=configs["Database"]["Scaling"]["Min"], max_capacity=configs["Database"]["Scaling"]["Max"], seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"] ), ) database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref) secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET", target_type="AWS::RDS::DBCluster", target_id=database.ref, secret_id=database_secret.secret_arn ) secret_target.node.add_dependency(database) ### database function image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE", role_name="ICS_IMAGE_DATA_FUNCTION_ROLE", assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"), _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess") ] ) image_data_function = Function(self, "ICS_IMAGE_DATA", function_name="ICS_IMAGE_DATA", runtime=Runtime.PYTHON_3_7, timeout=core.Duration.seconds(5), role=image_data_function_role, environment={ "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"], "CLUSTER_ARN": database_cluster_arn, "CREDENTIALS_ARN": database_secret.secret_arn, "DB_NAME": database.database_name, "REGION": core.Aws.REGION }, handler="main.handler", code=Code.asset("./src/imageData") ) image_search_integration = LambdaIntegration( image_data_function, proxy=True, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id, name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER", type="COGNITO_USER_POOLS", identity_source="method.request.header.Authorization", provider_arns=[users_pool.user_pool_arn]) api_gateway_image_search_resource.add_method('POST', image_search_integration, authorization_type=AuthorizationType.COGNITO, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }] ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref) lambda_access_search = _iam.PolicyStatement( effect=_iam.Effect.ALLOW, actions=["translate:TranslateText"], resources=["*"] ) image_data_function.add_to_role_policy(lambda_access_search) ### custom resource lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', on_event_handler=image_data_function ) core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', service_token=lambda_provider.service_token, pascal_case_properties=False, resource_type="Custom::SchemaCreation", properties={ "source": "Cloudformation" } ) ### event bridge event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS") event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE", rule_name="ICS_IMAGE_CONTENT_RULE", description="The event from image analyzer to store the data", event_bus=event_bus, event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]), ) event_rule.add_target(_event_targets.LambdaFunction(image_data_function)) event_bus.grant_put_events(image_analyzer_function) image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name) ### outputs core.CfnOutput(self, 'CognitoHostedUILogin', value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url), description='The Cognito Hosted UI Login Page' )
def __init__( self, scope: Stack, id: str, on_create_action: Dict[str, Any], on_update_action: Dict[str, Any], on_delete_action: Dict[str, Any], ) -> None: """ Constructor. :param scope: CloudFormation stack in which resources should be placed. :param id: Name (id) or prefix for resources. :param on_create_action: Create action arguments. Read more on: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.create_service :param on_update_action: Update action arguments. Read more on: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.update_service :param on_delete_action: Delete action arguments. Read more on: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_service """ self.__role = Role( scope=scope, id=id + 'Role', role_name=id + 'Role', assumed_by=CompositePrincipal( ServicePrincipal("lambda.amazonaws.com"), ServicePrincipal("cloudformation.amazonaws.com") ), inline_policies={ id + 'Policy': PolicyDocument( statements=[ PolicyStatement( actions=[ 'ecs:createService', 'ecs:updateService', 'ecs:deleteService', 'ecs:describeServices', 'ecs:listServices', 'ecs:updateServicePrimaryTaskSet' ], effect=Effect.ALLOW, resources=['*'] ), PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], effect=Effect.ALLOW, resources=['*'] ), ] ) }, managed_policies=[] ) self.__custom_backend = Function( scope=scope, id=id + 'Backend', code=Code.from_asset( path=package_root ), handler='index.handler', runtime=Runtime.PYTHON_3_6, description=f'A custom resource backend to manage ecs {id} service.', function_name=id + 'Backend', memory_size=128, role=self.__role, timeout=Duration.seconds(900), ) # noinspection PyTypeChecker provider: ICustomResourceProvider = CustomResourceProvider.from_lambda(self.__custom_backend) self.__custom_resource = CustomResource( scope=scope, id=id + 'CustomResource', provider=provider, removal_policy=RemovalPolicy.DESTROY, properties={ 'onCreate': on_create_action, 'onUpdate': on_update_action, 'onDelete': on_delete_action }, resource_type='Custom::EcsService' ) # Make sure that custom resource is deleted before lambda function backend. self.__custom_resource.node.add_dependency(self.__custom_backend)