def __init__(self, app: cdk.App, id: str, **kwargs) -> None: super().__init__(app, id) table_name = 'RandomWriterTable' with open("lambda-handler.py", encoding="utf8") as fp: handler_code = fp.read() lambda_fn = lambda_.Function( self, "RandomWriter", code=lambda_.InlineCode(handler_code), handler="index.main", timeout=300, runtime=lambda_.Runtime.PYTHON37, environment={'TABLE_NAME': table_name}, ) # Add our 'Every Minute' scheduling rule for this Lambda (via a CloudWatch scheduled Role) rule = events.Rule(self, "Rule", schedule_expression="cron(* * * * ? *)") rule.add_target(targets.LambdaFunction(lambda_fn)) # Build our DynamoDb Table dynamodb = Table(self, table_name, table_name=table_name, partition_key={ 'name': 'ID', 'type': AttributeType.String }, billing_mode=BillingMode.PayPerRequest) dynamodb.grant_full_access(lambda_fn)
def __init__( self, scope: core.Construct, id: str, function_name: str, handler: str, config_bucket: aws_s3.Bucket, state_table: aws_dynamodb.Table, dependency_layer: aws_lambda.LayerVersion, api: aws_apigateway.RestApi, endpoint: str, ) -> None: super().__init__(scope, id) environment = { 'bridge_env': 'PROD', 'bridge_config': f's3://{config_bucket.bucket_name}/bridge.json', 'state_dynamodb_table': state_table.table_name, } self.function = aws_lambda.Function( self, function_name, function_name=function_name, runtime=aws_lambda.Runtime.PYTHON_3_8, layers=[dependency_layer], code=code_asset, handler=handler, timeout=core.Duration.seconds(30), retry_attempts=0, environment=environment, ) function_resource = api.root.add_resource(endpoint) function_resource.add_method( 'POST', aws_apigateway.LambdaIntegration(handler=self.function, )) config_bucket.grant_read(self.function) state_table.grant_write_data(self.function)
def _create_lambda_role(self, role_name: str, table: aws_dynamodb.Table, write_permission: bool) -> aws_iam.Role: role = aws_iam.Role( self, f'{role_name}', assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"), inline_policies={ "RegionValidationPolicy": aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(actions=["ec2:DescribeRegions"], resources=["*"], effect=aws_iam.Effect.ALLOW), ]), "TracingPolicy": aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( actions=["ssm:Describe*", "ssm:Get*", "ssm:List*"], resources=["arn:aws:ssm:*"], effect=aws_iam.Effect.ALLOW, ), ]) }, # add CloudWatch logging policy managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole") ], ) # set DynamoDB permissions if write_permission: table.grant_read_write_data(role) else: table.grant_read_data(role) return role
def _create_tables(self): self.shows_table = Table( self, "shows_table", table_name="shows", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.shows_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id") self.episodes_table = Table( self, "episodes_table", table_name="shows-eps", partition_key=Attribute(name="show_id", type=AttributeType.STRING), sort_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.episodes_table.add_local_secondary_index(sort_key=Attribute( name="id", type=AttributeType.STRING), index_name="episode_id") self.episodes_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id")
def __init__(self, scope, id, cluster: ecs.Cluster, tracks_table: dynamodb.Table, processing_queue: sqs.Queue, upload_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) api_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), 'api')) self.api = ecs_patterns.ApplicationLoadBalancedFargateService( self, 'http-api-service', cluster=cluster, task_image_options=ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_asset(directory=api_dir), container_port=8080, environment={ 'PROCESSING_QUEUE_URL': processing_queue.queue_url, 'TRACKS_TABLE_NAME': tracks_table.table_name, 'UPLOAD_BUCKET_NAME': upload_bucket.bucket_name }), desired_count=2, cpu=256, memory_limit_mib=512) processing_queue.grant_send_messages( self.api.service.task_definition.task_role) tracks_table.grant_read_write_data( self.api.service.task_definition.task_role) upload_bucket.grant_put(self.api.service.task_definition.task_role)
def create_lambda(self, scope: core.Stack, lambda_id: str, handler: str, table: dynamo_db.Table): fn = _lambda.Function(scope, lambda_id, runtime=_lambda.Runtime.NODEJS_12_X, handler=handler, code=_lambda.Code.from_asset("lambdas"), environment={'TABLE_NAME': table.table_name}) table.grant_read_write_data(fn) return fn
def _create_tables(self): self.movies_table = Table( self, "movies_table", table_name="movies", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.movies_table.add_global_secondary_index( partition_key=Attribute(name="tmdb_id", type=AttributeType.NUMBER), index_name="tmdb_id" )
def __init__(self, scope: core.Construct, id: str, demo_table: dynamodb.Table, **kwargs) -> None: super().__init__(scope, id, **kwargs) bundling_options = core.BundlingOptions( image=_lambda.Runtime.NODEJS_12_X.bundling_docker_image, user="******", command=[ 'bash', '-c', 'cp /asset-input/* /asset-output/ && cd /asset-output && npm test' ] ) source_code = _lambda.Code.from_asset( './lambda', bundling=bundling_options ) # create lambda function web_lambda = _lambda.Function( self, "dynamo-lambda-function", runtime=_lambda.Runtime.NODEJS_12_X, handler="dynamoFunction.handler", code=source_code, environment=dict( TABLE_NAME=demo_table.table_name ) ) # grant permission to lambda to write to demo table demo_table.grant_full_access( web_lambda ) codedeploy.LambdaDeploymentGroup( self, "web-lambda-deployment", alias=web_lambda.current_version.add_alias("live"), deployment_config=codedeploy.LambdaDeploymentConfig.ALL_AT_ONCE ) gw = _apigw.LambdaRestApi( self, "Gateway", handler=web_lambda, description="Endpoint for a simple Lambda-powered web service" ) # add an output with a well-known name to read it from the integ tests self.gw_url = gw.url
def __init__(self, scope: cdk.Construct, construct_id: str, stage: str, table: _dynamo.Table, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) initial_data_role = iam.Role( self, "InitialDataRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole") ], ) initial_data_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AWSLambdaInvocation-DynamoDB")) initial_data_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess")) on_event = _lambda.Function( self, "DataHandler", runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset("lambda"), handler="initial_data.lambda_handler", timeout=cdk.Duration.minutes(5), environment={ "TABLE_NAME": table.table_name, "STAGE": stage }, ) table.grant_full_access(on_event) initial_data_provider = _resources.Provider( self, "InitialDataProvider", on_event_handler=on_event, log_retention=logs.RetentionDays.ONE_DAY, role=initial_data_role, ) cdk.CustomResource( self, "InitialDataResource", service_token=initial_data_provider.service_token, )
def __init__(self, scope: cdk.Construct, construct_id: str, stage: str, table: _dynamo.Table, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) lambda_schedule = _events.Schedule.rate(cdk.Duration.days(1)) reminder_role = iam.Role( self, "ReminderRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole") ], ) reminder_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMFullAccess")) reminder_lambda = _lambda.Function( self, "SendReminderHandler", runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset("lambda"), handler="send_reminder.lambda_handler", timeout=cdk.Duration.minutes(5), role=reminder_role, environment={ "TABLE_NAME": table.table_name, "STAGE": stage }, ) table.grant_full_access(reminder_lambda) event_lambda_target = _events_targets.LambdaFunction( handler=reminder_lambda) # lambda_cw_event _events.Rule( self, "SendReminders", description="Once per day CW event trigger for lambda", enabled=True, schedule=lambda_schedule, targets=[event_lambda_target], )
def __init__(self, scope: cdk.Construct, construct_id: str, db_context: str, **kwargs) -> None: super().__init__(scope, construct_id) # setting the db context db = dict(self.node.try_get_context(db_context)) # Shortening some of the logic billing_mode = BillingMode.PROVISIONED if db[ "db_billing_mode"] == "provisioned" else BillingMode.PAY_PER_REQUEST pk = db["db_table_pk"] pk_type = AttributeType.STRING if db[ "db_table_pk_type"] == "string" else AttributeType.NUMBER table = Table( self, db["db_table"], table_name=db["db_table"], partition_key=Attribute(name=pk, type=pk_type), read_capacity=db["db_min_read_capacity"], write_capacity=db["db_min_write_capacity"], encryption=_ddb.TableEncryption.AWS_MANAGED, point_in_time_recovery=True, removal_policy=cdk.RemovalPolicy.DESTROY, billing_mode=billing_mode, time_to_live_attribute=db["db_ttl_attribute"], ) # Add read/write autoscaling enabled at X% utilization if db["db_billing_mode"] == "provisioned" and db[ "db_enable_autoscaling"]: read_scaling = table.auto_scale_read_capacity( min_capacity=db["db_min_read_capacity"], max_capacity=db["db_max_read_capacity"], ) read_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) write_scaling = table.auto_scale_write_capacity( min_capacity=db["db_min_write_capacity"], max_capacity=db["db_max_write_capacity"], ) write_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) self.table = table
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) table_name = "posts2" function_name = "cl2" email = "*****@*****.**" table = Table( self, "cl_posts", table_name=table_name, partition_key=Attribute(name="url", type=AttributeType.STRING), time_to_live_attribute="ttl", ) function = PythonFunction( self, "cl_function", function_name=function_name, entry="src", index="app.py", runtime=Runtime.PYTHON_3_8, environment={ "cl_email": email, "cl_table_name": table_name }, timeout=Duration.seconds(300), initial_policy=[ PolicyStatement( actions=["ses:SendEmail", "ses:VerifyEmailIdentity"], resources=[ f"arn:aws:ses:{self.region}:{self.account}:identity/{email}" ], ), PolicyStatement( actions=[ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem" ], resources=[table.table_arn], ), ], ) with open("events/event.json") as f: event = json.load(f) Rule( self, "cl_schedule", schedule=Schedule.expression("cron(0 19 * * ? *)"), targets=[ LambdaFunction(function, event=RuleTargetInput.from_object(event)) ], )
def __init__(self, scope: Construct, id: str, table_name: str) -> None: super().__init__(scope, id) primary_key = Attribute(name="id", type=AttributeType.STRING) self.table = Table( self, "SmolTable", billing_mode=BillingMode.PAY_PER_REQUEST, partition_key=primary_key, point_in_time_recovery=True, table_name=table_name, )
def add_get_method( self, api: aws_apigateway.RestApi, resource: aws_apigateway.Resource, table: aws_dynamodb.Table, ) -> aws_apigateway.Method: list_announcements_lambda = create_function( stack=self, id="ListAnnouncementLambda", settings={ "handler": "list_announcements.main", "runtime": aws_lambda.Runtime.PYTHON_3_8, "timeout": core.Duration.minutes( self.settings.AWS_LAMBDA_GET_ANNOUNCEMENT_TIMEOUT), "retry_attempts": self.settings.AWS_LAMBDA_GET_ANNOUNCEMENT_RETRY_ATTEMPTS, }, ) table.grant_read_data(list_announcements_lambda) list_announcements_lambda.add_environment( "TABLE_NAME", table.table_name, ) list_announcements_method = resource.add_method( "GET", integration=aws_apigateway.LambdaIntegration( list_announcements_lambda, proxy=True, integration_responses=[ aws_apigateway.IntegrationResponse(status_code="200", ), aws_apigateway.IntegrationResponse(status_code="404", ), ], ), ) self.methods_to_deploy.append(list_announcements_method) return list_announcements_method
def __init__(self, scope: core.Construct, id: str, target_table: ddb.Table, **kwargs) -> None: super().__init__(scope, id, **kwargs) feed_scanner_lambda = pylambda.PythonFunction(self, "FeedScannerLambda", function_name="CloudsatLHR-Feed-Scanner", entry="lambdas/feed_scanner/", index="app/index.py", runtime=lambda_.Runtime.PYTHON_3_7 ) self.feed_scanner_lambda = feed_scanner_lambda target_table.grant_write_data(feed_scanner_lambda) target_table.grant_read_data(feed_scanner_lambda) five_minute_timer = events.Rule( self, "FiveMinuteTimer", enabled=True, schedule=events.Schedule.rate(core.Duration.minutes(5)), targets=[targets.LambdaFunction(feed_scanner_lambda)]) self.five_minute_timer = five_minute_timer
def _create_tables(self): self.anime_table = Table( self, "anime_items", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="mal_id", type=AttributeType.NUMBER), index_name="mal_id" ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="broadcast_day", type=AttributeType.STRING), index_name="broadcast_day" ) self.anime_episodes = Table( self, "anime_episodes", partition_key=Attribute(name="anime_id", type=AttributeType.STRING), sort_key=Attribute(name="episode_number", type=AttributeType.NUMBER), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_episodes.add_local_secondary_index( sort_key=Attribute(name="id", type=AttributeType.STRING), index_name="episode_id" ) self.anime_episodes.add_global_secondary_index( partition_key=Attribute(name="anidb_id", type=AttributeType.NUMBER), index_name="anidb_id" ) self.anime_params = Table( self, "anime_params", partition_key=Attribute(name="name", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DynamoDB table = Table( self, "DynamoTableTable", partition_key=Attribute(name="pk", type=AttributeType.NUMBER), # パーテーションキー sort_key=Attribute(name="sk", type=AttributeType.STRING), # ソートキー billing_mode=BillingMode.PAY_PER_REQUEST, # オンデマンドに設定 removal_policy=core.RemovalPolicy. DESTROY, # Stackの削除と一緒にテーブルを削除する(オプション) )
def __init__(self, scope, id, cluster: ecs.Cluster, tracks_table: dynamodb.Table, input_bucket: s3.Bucket, output_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) worker_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), 'worker')) self.service = ecs_patterns.QueueProcessingFargateService( self, 'separator-service', cluster=cluster, cpu=2048, memory_limit_mib=8192, image=ecs.ContainerImage.from_asset(directory=worker_dir), environment={ 'TRACKS_TABLE_NAME': tracks_table.table_name, 'OUTPUT_BUCKET_NAME': output_bucket.bucket_name }) input_bucket.grant_read(self.service.task_definition.task_role) output_bucket.grant_write(self.service.task_definition.task_role) tracks_table.grant_read_write_data( self.service.task_definition.task_role)
def create_deploy_status_table(self) -> Resource: """コンポーネントの作成状況を保持するテーブル Table Schema * Partition_key * component_name * Sort_key * version * Item * bucket * s3_path * component_arn * pipeline_status * image_creating: コンテナイメージをビルド中 * image_faild: コンテナイメージの作成に失敗 * image_exists: コンテナイメージが存在する * component_exists: GGのコンポーネントが存在する * component_faild: 何らかの理由でコンポーネントの登録に失敗した * create_deployment: * update_time * deployment_status * IN_PROGRESS * ACTIVE * CANCELLED * deploy_group * job_id Returns: Resource: DynamoDB Table """ table_name = f"{self.stack_name}_{self.component_id}_" + "deploy_status" table = Table( self, id=table_name, table_name=table_name, partition_key=Attribute(name="component_name", type=AttributeType.STRING), # パーテーションキー sort_key=Attribute(name="version", type=AttributeType.STRING), # ソートキー removal_policy=RemovalPolicy. DESTROY, # Stackの削除と一緒にテーブルを削除する(オプション) ) return table
def __init__(self, scope: Stack): super().__init__(scope=scope, id=f"TestingStack", stack_name=f"TestingStack") table = Table( scope=self, id="TestingTable", partition_key=Attribute(name="id", type=AttributeType.STRING), stream=StreamViewType.NEW_IMAGE, removal_policy=RemovalPolicy.DESTROY, ) domain = Domain( scope=self, id="TestingElasticsearchDomain", version=ElasticsearchVersion.V7_7, capacity=CapacityConfig( # Use the cheapest instance available. data_node_instance_type="t3.small.elasticsearch", data_nodes=1, master_nodes=None, ), zone_awareness=ZoneAwarenessConfig(enabled=False), ebs=EbsOptions(enabled=True, volume_size=10, volume_type=EbsDeviceVolumeType.GP2), ) elasticsearch_index = ElasticsearchIndexResource( scope=self, name="TestingElasticsearchIndex", elasticsearch_domain=domain, index_prefix="testing_index", ) elasticsearch_cloner = ElasticsearchCloner( scope=self, id="TestingElasticsearchCloner", elasticsearch_index=elasticsearch_index, dynamodb_table=table, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # =======> DynamoDB gsi_name = "HogeGSI" table = Table( self, "DynamoTableGsiTable", partition_key=Attribute(name="pk", type=AttributeType.NUMBER), sort_key=Attribute(name="sk", type=AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, ) table.add_global_secondary_index( index_name=gsi_name, partition_key=Attribute(name="gsi_pk", type=AttributeType.NUMBER), sort_key=Attribute(name="gsi_sk", type=AttributeType.STRING), projection_type=ProjectionType.KEYS_ONLY, ) # =======> Lambda lambda_ = aws_lambda.Function( self, "LambdaDynamoTableGsi", code=aws_lambda.Code.asset("lambdas/dynamo_table_gsi"), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", environment={ "TABLE_NAME": table.table_name, "GSI_NAME": gsi_name }, ) table.grant_read_data(lambda_) # これがあればGSIに読み込み権限が付与される
def __init__(self, scope: core.Construct, id: str, table: dynamodb.Table, index_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) role = iam.Role( self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'), ], ) table.grant_read_data(role) xray = lambda_.LayerVersion( self, 'Xray', compatible_runtimes=[ lambda_.Runtime.PYTHON_3_6, lambda_.Runtime.PYTHON_3_7, lambda_.Runtime.PYTHON_3_8, ], code=lambda_.Code.from_asset('src/xray'), ) version_options = lambda_.VersionOptions( retry_attempts=0, # No retries provisioned_concurrent_executions=1, # Avoid cold starts ) list_function = lambda_.Function( self, 'List', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('src/list'), handler='list.handler', role=role, tracing=lambda_.Tracing.ACTIVE, current_version_options=version_options, layers=[ xray, ], environment={ 'TABLE_NAME': table.table_name, 'INDEX_NAME': index_name, } ) query_function = lambda_.Function( self, 'Query', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('src/query'), handler='query.handler', role=role, tracing=lambda_.Tracing.ACTIVE, current_version_options=version_options, layers=[ xray, ], environment={ 'TABLE_NAME': table.table_name } ) api = apigateway.RestApi( self, 'StockHistory', endpoint_types=[ apigateway.EndpointType.REGIONAL ], cloud_watch_role=True, deploy_options=apigateway.StageOptions( logging_level=apigateway.MethodLoggingLevel.ERROR, metrics_enabled=True, tracing_enabled=True, ), ) stock_root_resource = api.root.add_resource('stock') stock_id_resource = stock_root_resource.add_resource('{ticker}') stock_root_resource.add_method( http_method='GET', integration=apigateway.LambdaIntegration( list_function.current_version, proxy=True, ), ) stock_id_resource.add_method( http_method='GET', integration=apigateway.LambdaIntegration( query_function.current_version, proxy=True, ), request_parameters={ 'method.request.querystring.start': False, 'method.request.querystring.end': False, }, ) self.api = api
class Shows(core.Stack): def __init__(self, app: core.App, id: str, domain_name: str, **kwargs) -> None: super().__init__(app, id, **kwargs) self.domain_name = domain_name self.layers = {} self.lambdas = {} self._create_tables() self._create_topic() self._create_lambdas_config() self._create_layers() self._create_lambdas() self._create_gateway() def _create_tables(self): self.shows_table = Table( self, "shows_table", table_name="shows", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.shows_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id") self.episodes_table = Table( self, "episodes_table", table_name="shows-eps", partition_key=Attribute(name="show_id", type=AttributeType.STRING), sort_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.episodes_table.add_local_secondary_index(sort_key=Attribute( name="id", type=AttributeType.STRING), index_name="episode_id") self.episodes_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id") def _create_topic(self): self.show_updates_topic = Topic( self, "shows_updates", topic_name="shows-updates", ) def _create_lambdas_config(self): self.lambdas_config = { "api-shows_by_id": { "layers": ["utils", "databases", "api"], "variables": { "SHOWS_DATABASE_NAME": self.shows_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement(actions=["dynamodb:GetItem"], resources=[self.shows_table.table_arn]) ], "timeout": 3, "memory": 128 }, "api-shows": { "layers": ["utils", "databases", "api"], "variables": { "SHOWS_DATABASE_NAME": self.shows_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[ f"{self.shows_table.table_arn}/index/tvmaze_id" ]), PolicyStatement(actions=["dynamodb:UpdateItem"], resources=[self.shows_table.table_arn]), ], "timeout": 10, "memory": 128 }, "api-episodes": { "layers": ["utils", "databases", "api"], "variables": { "SHOWS_DATABASE_NAME": self.shows_table.table_name, "SHOW_EPISODES_DATABASE_NAME": self.episodes_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement(actions=["dynamodb:GetItem"], resources=[self.shows_table.table_arn]), PolicyStatement( actions=["dynamodb:Query"], resources=[ f"{self.episodes_table.table_arn}/index/tvmaze_id" ]), PolicyStatement(actions=["dynamodb:UpdateItem"], resources=[self.episodes_table.table_arn]), PolicyStatement(actions=["dynamodb:GetItem"], resources=[self.episodes_table.table_arn]), ], "timeout": 10, "memory": 128 }, "api-episodes_by_id": { "layers": ["utils", "databases", "api"], "variables": { "SHOW_EPISODES_DATABASE_NAME": self.episodes_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement(actions=["dynamodb:Query"], resources=[self.episodes_table.table_arn]), ], "timeout": 3, "memory": 128 }, "cron-update_eps": { "layers": ["utils", "databases", "api", "publishers"], "variables": { "SHOWS_DATABASE_NAME": self.shows_table.table_name, "LOG_LEVEL": "INFO", "UPDATES_TOPIC_ARN": self.show_updates_topic.topic_arn, }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[ f"{self.shows_table.table_arn}/index/tvmaze_id" ], ), PolicyStatement( actions=["sns:Publish"], resources=[self.show_updates_topic.topic_arn], ) ], "timeout": 60, "memory": 1024 }, } def _create_layers(self): if os.path.isdir(BUILD_FOLDER): shutil.rmtree(BUILD_FOLDER) os.mkdir(BUILD_FOLDER) for layer in os.listdir(LAYERS_DIR): layer_folder = os.path.join(LAYERS_DIR, layer) build_folder = os.path.join(BUILD_FOLDER, layer) shutil.copytree(layer_folder, build_folder) requirements_path = os.path.join(build_folder, "requirements.txt") if os.path.isfile(requirements_path): packages_folder = os.path.join(build_folder, "python", "lib", "python3.8", "site-packages") # print(f"Installing layer requirements to target: {os.path.abspath(packages_folder)}") subprocess.check_output([ "pip", "install", "-r", requirements_path, "-t", packages_folder ]) clean_pycache() self.layers[layer] = LayerVersion( self, layer, layer_version_name=f"shows-{layer}", code=Code.from_asset(path=build_folder), compatible_runtimes=[Runtime.PYTHON_3_8], ) def _create_lambdas(self): for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role(self, f"{name}_role", assumed_by=ServicePrincipal( service="lambda.amazonaws.com")) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole")) self.lambdas[name] = Function( self, name, code=Code.from_asset(root), handler="__init__.handle", runtime=Runtime.PYTHON_3_8, layers=layers, function_name=name, environment=lambda_config["variables"], role=lambda_role, timeout=Duration.seconds(lambda_config["timeout"]), memory_size=lambda_config["memory"], ) Rule(self, "update_eps", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["cron-update_eps"])]) def _create_gateway(self): cert = Certificate(self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS) domain_name = DomainName( self, "domain_name", certificate=cert, domain_name=self.domain_name, ) http_api = HttpApi( self, "shows_gateway", create_default_stage=False, api_name="shows", cors_preflight=CorsPreflightOptions( allow_methods=[HttpMethod.GET, HttpMethod.POST], allow_origins=["https://moshan.tv", "https://beta.moshan.tv"], allow_headers=["authorization", "content-type"])) routes = { "get_shows": { "method": "GET", "route": "/shows", "target_lambda": self.lambdas["api-shows"] }, "post_shows": { "method": "POST", "route": "/shows", "target_lambda": self.lambdas["api-shows"] }, "get_shows_by_id": { "method": "GET", "route": "/shows/{id}", "target_lambda": self.lambdas["api-shows_by_id"] }, "get_episodes": { "method": "GET", "route": "/episodes", "target_lambda": self.lambdas["api-episodes"] }, "post_episodes": { "method": "POST", "route": "/shows/{id}/episodes", "target_lambda": self.lambdas["api-episodes"] }, "get_episodes_by_id": { "method": "GET", "route": "/shows/{id}/episodes/{episode_id}", "target_lambda": self.lambdas["api-episodes_by_id"] }, } for r in routes: integration = HttpIntegration( self, f"{r}_integration", http_api=http_api, integration_type=HttpIntegrationType.LAMBDA_PROXY, integration_uri=routes[r]["target_lambda"].function_arn, method=getattr(HttpMethod, routes[r]["method"]), payload_format_version=PayloadFormatVersion.VERSION_2_0, ) CfnRoute( self, r, api_id=http_api.http_api_id, route_key=f"{routes[r]['method']} {routes[r]['route']}", # authorization_type="AWS_IAM", # TODO: add back when: https://github.com/aws/aws-cdk/pull/14853 gets merged (set this manually for now) target="integrations/" + integration.integration_id) routes[r]["target_lambda"].add_permission( f"{r}_apigateway_invoke", principal=ServicePrincipal("apigateway.amazonaws.com"), source_arn= f"arn:aws:execute-api:{self.region}:{self.account}:{http_api.http_api_id}/*" ) HttpStage(self, "live", http_api=http_api, auto_deploy=True, stage_name="live", domain_mapping=DomainMappingOptions( domain_name=domain_name, ))
def __init__(self, scope: core.Construct, id: str, ctx: object, ecr_repository: ecr.Repository, kinesis_stream: ks.Stream, state_table: ddb.Table, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.ecr_repository = ecr_repository self.kinesis_stream = kinesis_stream self.state_table = state_table service_name = "processor" ctx_srv = getattr(ctx.outbound.services.pull, service_name) self.vpc = ec2.Vpc.from_vpc_attributes( self, "VPC", **ctx.vpc_props.dict() ) # CloudWatch Logs Group self.log_group = cwl.LogGroup( scope = self, id = "logs" ) # Create a new ECS cluster for our services self.cluster = ecs.Cluster( self, vpc = self.vpc, id = f"{id}_cluster" ) cluster_name_output = core.CfnOutput( scope=self, id="cluster-name-out", value=self.cluster.cluster_name, export_name=f"{id}-cluster-name" ) service_names_output = core.CfnOutput( scope=self, id="service-names-out", value=service_name, export_name=f"{id}-service-names" ) # Create a role for ECS to interact with AWS APIs with standard permissions self.ecs_exec_role = iam.Role( scope = self, id = "ecs_logstash-exec_role", assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com"), managed_policies = ([ iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy") ]) ) # Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key if getattr(ctx, "secrets_key_arn", None) is not None: self.ecs_exec_role.add_to_policy( iam.PolicyStatement( actions = ["kms:Decrypt"], effect = iam.Effect.ALLOW, resources = [ctx.secrets_key_arn] )) # Grant ECS permissions to log to our log group self.log_group.grant_write(self.ecs_exec_role) # Create a task role to grant permissions for Logstash to interact with AWS APIs ecs_task_role = iam.Role( scope = self, id = f"{service_name}_task_role", assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com") ) # Add permissions for Logstash to send metrics to CloudWatch ecs_task_role.add_to_policy( iam.PolicyStatement( actions = ["cloudwatch:PutMetricData"], effect = iam.Effect.ALLOW, resources = ["*"] )) # Add permissions for Logstash to interact with our Kinesis queue self.kinesis_stream.grant_read(ecs_task_role) # Remove this when next version of kinesis module is released # https://github.com/aws/aws-cdk/pull/6141 ecs_task_role.add_to_policy( iam.PolicyStatement( actions = ["kinesis:ListShards"], effect = iam.Effect.ALLOW, resources = [self.kinesis_stream.stream_arn] )) # Add permissions for Logstash to store Kinesis Consumer Library (KCL) state tracking in DynamoDB state_table.grant_full_access(ecs_task_role) # Add permissions for Logstash to upload logs to S3 for archive bucket_resources = [] for k, v in ctx_srv.variables.items(): if k.endswith("_log_bucket"): bucket_resources.append('arn:aws:s3:::{0}'.format(v)) bucket_resources.append('arn:aws:s3:::{0}/*'.format(v)) ecs_task_role.add_to_policy( iam.PolicyStatement( actions=[ "s3:PutObject", "s3:ListMultipartUploadParts", "s3:ListBucket", "s3:AbortMultipartUpload" ], effect=iam.Effect.ALLOW, resources=bucket_resources )) # Task Definition task_definition = ecs.FargateTaskDefinition( scope = self, id = f"{service_name}_task_definition", cpu = ctx_srv.size.cpu, memory_limit_mib = ctx_srv.size.ram, execution_role = self.ecs_exec_role, task_role = ecs_task_role, ) log_driver = ecs.LogDriver.aws_logs( log_group = self.log_group, stream_prefix = service_name) # Container Definition container_vars = self.__get_container_vars(service_name, ctx, ctx_srv) container = ecs.ContainerDefinition( scope = self, id = f"{service_name}_container_definition", task_definition = task_definition, image = ecs.ContainerImage.from_ecr_repository(self.ecr_repository, "latest"), logging = log_driver, **container_vars ) # Service Definition security_group = ec2.SecurityGroup( scope = self, id = f"{service_name}_sg", vpc = self.vpc ) service = ecs.FargateService( scope = self, id = f"{service_name}_fargate_service", task_definition = task_definition, cluster = self.cluster, desired_count = getattr(ctx_srv, "desired_count", ctx.default_desired_count), service_name = service_name, security_group = security_group ) scaling = service.auto_scale_task_count( max_capacity = ctx_srv.scaling.max_capacity, min_capacity = ctx_srv.scaling.min_capacity ) scaling.scale_on_cpu_utilization( id = "cpu_scaling", target_utilization_percent = ctx_srv.scaling.target_utilization_percent, scale_in_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_in_cooldown_seconds), scale_out_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_out_cooldown_seconds), )
def __init__( self, scope: Construct, construct_id: str, *, deploy_env: str, processing_assets_table: aws_dynamodb.Table, ): # pylint: disable=too-many-locals super().__init__(scope, construct_id) if deploy_env == "prod": instance_types = [ aws_ec2.InstanceType("c5.xlarge"), aws_ec2.InstanceType("c5.2xlarge"), aws_ec2.InstanceType("c5.4xlarge"), aws_ec2.InstanceType("c5.9xlarge"), ] else: instance_types = [ aws_ec2.InstanceType("m5.large"), aws_ec2.InstanceType("m5.xlarge"), ] ec2_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2ContainerServiceforEC2Role") batch_instance_role = aws_iam.Role( self, "batch-instance-role", assumed_by=aws_iam.ServicePrincipal( "ec2.amazonaws.com"), # type: ignore[arg-type] managed_policies=[ec2_policy], ) processing_assets_table.grant_read_write_data( batch_instance_role) # type: ignore[arg-type] batch_instance_profile = aws_iam.CfnInstanceProfile( self, "batch-instance-profile", roles=[batch_instance_role.role_name], ) batch_launch_template_data = textwrap.dedent(""" MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="==MYBOUNDARY==" --==MYBOUNDARY== Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash echo ECS_IMAGE_PULL_BEHAVIOR=prefer-cached >> /etc/ecs/ecs.config --==MYBOUNDARY==-- """) launch_template_data = aws_ec2.CfnLaunchTemplate.LaunchTemplateDataProperty( user_data=Fn.base64(batch_launch_template_data.strip())) cloudformation_launch_template = aws_ec2.CfnLaunchTemplate( self, "batch-launch-template", launch_template_name=f"{deploy_env}-datalake-batch-launch-template", launch_template_data=launch_template_data, ) assert cloudformation_launch_template.launch_template_name is not None launch_template = aws_batch.LaunchTemplateSpecification( launch_template_name=cloudformation_launch_template. launch_template_name) # use existing VPC in LINZ AWS account. # VPC with these tags is required to exist in AWS account before being deployed. # A VPC will not be deployed by this project. vpc = aws_ec2.Vpc.from_lookup( self, "datalake-vpc", tags={ APPLICATION_NAME_TAG_NAME: APPLICATION_NAME, "ApplicationLayer": "networking", }, ) compute_resources = aws_batch.ComputeResources( vpc=vpc, minv_cpus=0, desiredv_cpus=0, maxv_cpus=1000, instance_types=instance_types, instance_role=batch_instance_profile.instance_profile_name, allocation_strategy=aws_batch.AllocationStrategy( "BEST_FIT_PROGRESSIVE"), launch_template=launch_template, ) batch_service_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSBatchServiceRole") service_role = aws_iam.Role( self, "batch-service-role", assumed_by=aws_iam.ServicePrincipal( "batch.amazonaws.com"), # type: ignore[arg-type] managed_policies=[batch_service_policy], ) compute_environment = aws_batch.ComputeEnvironment( self, "compute-environment", compute_resources=compute_resources, service_role=service_role, # type: ignore[arg-type] ) self.job_queue = aws_batch.JobQueue( scope, f"{construct_id}-job-queue", compute_environments=[ aws_batch.JobQueueComputeEnvironment( compute_environment=compute_environment, order=10 # type: ignore[arg-type] ), ], priority=10, )
def add_post_method( self, api: aws_apigateway.RestApi, resource: aws_apigateway.Resource, table: aws_dynamodb.Table, ) -> aws_apigateway.Method: create_announcement_lambda = create_function( stack=self, id="CreateAnnouncementLambda", settings={ "handler": "create_announcement.main", "runtime": aws_lambda.Runtime.PYTHON_3_8, "timeout": core.Duration.minutes( self.settings.AWS_LAMBDA_CREATE_ANNOUNCEMENT_TIMEOUT), "retry_attempts": self.settings.AWS_LAMBDA_CREATE_ANNOUNCEMENT_RETRY_ATTEMPTS, }, ) create_announcement_lambda.add_environment( "TABLE_NAME", table.table_name, ) table.grant_read_write_data(create_announcement_lambda) create_announcement_request_validator = aws_apigateway.RequestValidator( self, "CreateAnnouncementRequestValidator", rest_api=api, validate_request_body=True, request_validator_name="Create Announcement Request Validator", ) create_announcement_request_model = aws_apigateway.Model( self, "CreateAnnouncementRequestModel", model_name="CreateAnnouncementRequest", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["Item"], properties={ "Item": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["title", "date", "description"], properties={ "title": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING), "description": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING, ), "date": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING, min_length=1, format="date", pattern= "^\d{4}-([0]\d|1[0-2])-([0-2]\d|3[01])$", ), }, ) }, ), ) create_announcement_response_success_model = aws_apigateway.Model( self, "CreateAnnouncementResponseSuccess", model_name="CreateAnnouncementResponseSuccess", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["id"], properties={ "id": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING) }, ), ) create_announcement_response_error_model = aws_apigateway.Model( self, "CreateAnnouncementResponseError", model_name="CreateAnnouncementResponseError", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["error"], properties={ "error": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING) }, ), ) create_announcement_method = resource.add_method( "POST", integration=aws_apigateway.LambdaIntegration( create_announcement_lambda, proxy=True, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ), aws_apigateway.IntegrationResponse( status_code="404", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ), ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, ), request_validator=create_announcement_request_validator, request_models={ "application/json": create_announcement_request_model }, method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_models={ "application/json": create_announcement_response_success_model }, response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ), aws_apigateway.MethodResponse( status_code="404", response_models={ "application/json": create_announcement_response_error_model }, response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ), ], ) self.methods_to_deploy.append(create_announcement_method) return create_announcement_method
class Anime(core.Stack): def __init__(self, app: core.App, id: str, mal_client_id: str, anidb_client: str, domain_name: str, **kwargs) -> None: super().__init__(app, id, **kwargs) self.mal_client_id = mal_client_id self.anidb_client = anidb_client self.domain_name = domain_name self.layers = {} self.lambdas = {} self._create_buckets() self._create_tables() self._create_queues() self._create_lambdas_config() self._create_layers() self._create_lambdas() self._create_gateway() def _create_buckets(self): self.anidb_titles_bucket = Bucket( self, "anidb_titles_bucket", block_public_access=BlockPublicAccess( block_public_acls=True, block_public_policy=True, ), removal_policy=core.RemovalPolicy.DESTROY, lifecycle_rules=[ LifecycleRule(expiration=Duration.days(3)), ] ) def _create_tables(self): self.anime_table = Table( self, "anime_items", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="mal_id", type=AttributeType.NUMBER), index_name="mal_id" ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="broadcast_day", type=AttributeType.STRING), index_name="broadcast_day" ) self.anime_episodes = Table( self, "anime_episodes", partition_key=Attribute(name="anime_id", type=AttributeType.STRING), sort_key=Attribute(name="episode_number", type=AttributeType.NUMBER), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_episodes.add_local_secondary_index( sort_key=Attribute(name="id", type=AttributeType.STRING), index_name="episode_id" ) self.anime_episodes.add_global_secondary_index( partition_key=Attribute(name="anidb_id", type=AttributeType.NUMBER), index_name="anidb_id" ) self.anime_params = Table( self, "anime_params", partition_key=Attribute(name="name", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) def _create_queues(self): post_anime_dl = Queue(self, "post_anime_dl") self.post_anime_queue = Queue( self, "anime", dead_letter_queue=DeadLetterQueue(max_receive_count=5, queue=post_anime_dl), receive_message_wait_time=Duration.seconds(20) ) def _create_lambdas_config(self): self.lambdas_config = { "api-anime_by_id": { "layers": ["utils", "databases"], "variables": { "ANIME_DATABASE_NAME": self.anime_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:GetItem"], resources=[self.anime_table.table_arn] ) ], "timeout": 3, "memory": 128 }, "api-anime_episodes": { "layers": ["utils", "databases"], "variables": { "ANIME_EPISODES_DATABASE_NAME": self.anime_episodes.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[self.anime_episodes.table_arn] ), PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.anime_episodes.table_arn}/index/anidb_id"] ) ], "timeout": 3, "memory": 512 }, "api-anime_episode": { "layers": ["utils", "databases"], "variables": { "ANIME_EPISODES_DATABASE_NAME": self.anime_episodes.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.anime_episodes.table_arn}/index/episode_id"] ) ], "timeout": 3, "memory": 512 }, "api-anime": { "layers": ["utils", "databases", "api"], "variables": { "ANIME_DATABASE_NAME": self.anime_table.table_name, "POST_ANIME_SQS_QUEUE_URL": self.post_anime_queue.queue_url, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.anime_table.table_arn}/index/mal_id"] ), PolicyStatement( actions=["sqs:SendMessage"], resources=[self.post_anime_queue.queue_arn] ), ], "timeout": 10, "memory": 128 }, "crons-titles_updater": { "layers": ["utils", "databases", "api"], "variables": { "ANIDB_TITLES_BUCKET": self.anidb_titles_bucket.bucket_name, "LOG_LEVEL": "INFO", }, "concurrent_executions": 1, "policies": [ PolicyStatement( actions=["s3:ListBucket"], resources=[self.anidb_titles_bucket.bucket_arn] ), PolicyStatement( actions=["s3:GetObject", "s3:PutObject"], resources=[self.anidb_titles_bucket.arn_for_objects("*")] ) ], "timeout": 120, "memory": 128 }, "crons-episodes_updater": { "layers": ["utils", "databases"], "variables": { "LOG_LEVEL": "DEBUG", "POST_ANIME_SQS_QUEUE_URL": self.post_anime_queue.queue_url, "ANIME_DATABASE_NAME": self.anime_table.table_name, }, "concurrent_executions": 1, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.anime_table.table_arn}/index/broadcast_day"] ), PolicyStatement( actions=["sqs:SendMessage"], resources=[self.post_anime_queue.queue_arn] ), ], "timeout": 120, "memory": 128 }, "sqs_handlers-post_anime": { "layers": ["utils", "databases", "api"], "variables": { "ANIME_DATABASE_NAME": self.anime_table.table_name, "ANIME_EPISODES_DATABASE_NAME": self.anime_episodes.table_name, "ANIME_PARAMS_DATABASE_NAME": self.anime_params.table_name, "MAL_CLIENT_ID": self.mal_client_id, "ANIDB_TITLES_BUCKET": self.anidb_titles_bucket.bucket_name, "ANIDB_CLIENT": self.anidb_client, "LOG_LEVEL": "INFO", }, "concurrent_executions": 1, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.anime_table.table_arn}/index/mal_id"] ), PolicyStatement( actions=["dynamodb:UpdateItem"], resources=[self.anime_table.table_arn] ), PolicyStatement( actions=["dynamodb:BatchWriteItem"], resources=[self.anime_episodes.table_arn] ), PolicyStatement( actions=["dynamodb:UpdateItem", "dynamodb:GetItem"], resources=[self.anime_params.table_arn] ), PolicyStatement( actions=["s3:ListBucket"], resources=[self.anidb_titles_bucket.bucket_arn] ), PolicyStatement( actions=["s3:GetObject"], resources=[self.anidb_titles_bucket.arn_for_objects("*")] ) ], "timeout": 60, "memory": 2048 }, } def _create_layers(self): if os.path.isdir(BUILD_FOLDER): shutil.rmtree(BUILD_FOLDER) os.mkdir(BUILD_FOLDER) for layer in os.listdir(LAYERS_DIR): layer_folder = os.path.join(LAYERS_DIR, layer) build_folder = os.path.join(BUILD_FOLDER, layer) shutil.copytree(layer_folder, build_folder) requirements_path = os.path.join(build_folder, "requirements.txt") if os.path.isfile(requirements_path): packages_folder = os.path.join(build_folder, "python", "lib", "python3.8", "site-packages") # print(f"Installing layer requirements to target: {os.path.abspath(packages_folder)}") subprocess.check_output(["pip", "install", "-r", requirements_path, "-t", packages_folder]) clean_pycache() self.layers[layer] = LayerVersion( self, layer, layer_version_name=f"anime-{layer}", code=Code.from_asset(path=build_folder), compatible_runtimes=[Runtime.PYTHON_3_8], ) def _create_lambdas(self): clean_pycache() for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) lambda_args = { "code": Code.from_asset(root), "handler": "__init__.handle", "runtime": Runtime.PYTHON_3_8, "layers": layers, "function_name": name, "environment": lambda_config["variables"], "role": lambda_role, "timeout": Duration.seconds(lambda_config["timeout"]), "memory_size": lambda_config["memory"], } if "concurrent_executions" in lambda_config: lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"] self.lambdas[name] = Function(self, name, **lambda_args) self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue)) Rule( self, "titles_updater", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["crons-titles_updater"])] ) Rule( self, "episodes_updater", schedule=Schedule.cron(hour="4", minute="10"), targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])] ) def _create_gateway(self): cert = Certificate( self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS ) domain_name = DomainName( self, "domain", domain_name=self.domain_name, certificate=cert, security_policy=SecurityPolicy.TLS_1_2 ) http_api = HttpApi( self, "anime_gateway", create_default_stage=False, api_name="anime", cors_preflight=CorsPreflightOptions( allow_methods=[HttpMethod.GET, HttpMethod.POST], allow_origins=["https://moshan.tv", "https://beta.moshan.tv"], allow_headers=["authorization", "content-type", "x-mal-client-id"] ) ) authorizer = CfnAuthorizer( self, "cognito", api_id=http_api.http_api_id, authorizer_type="JWT", identity_source=["$request.header.Authorization"], name="cognito", jwt_configuration=CfnAuthorizer.JWTConfigurationProperty( audience=["68v5rahd0sdvrmf7fgbq2o1a9u"], issuer="https://cognito-idp.eu-west-1.amazonaws.com/eu-west-1_sJ3Y4kSv6" ) ) routes = { "get_anime": { "method": "GET", "route": "/anime", "target_lambda": self.lambdas["api-anime"] }, "post_anime": { "method": "POST", "route": "/anime", "target_lambda": self.lambdas["api-anime"] }, "get_anime_by_id": { "method": "GET", "route": "/anime/{id}", "target_lambda": self.lambdas["api-anime_by_id"] }, "get_anime_episodes": { "method": "GET", "route": "/anime/{id}/episodes", "target_lambda": self.lambdas["api-anime_episodes"] }, "post_anime_episode": { "method": "POST", "route": "/anime/{id}/episodes", "target_lambda": self.lambdas["api-anime_episodes"] }, "get_anime_episode": { "method": "GET", "route": "/anime/{id}/episodes/{episode_id}", "target_lambda": self.lambdas["api-anime_episode"] }, } for r in routes: integration = HttpIntegration( self, f"{r}_integration", http_api=http_api, integration_type=HttpIntegrationType.LAMBDA_PROXY, integration_uri=routes[r]["target_lambda"].function_arn, method=getattr(HttpMethod, routes[r]["method"]), payload_format_version=PayloadFormatVersion.VERSION_2_0, ) CfnRoute( self, r, api_id=http_api.http_api_id, route_key=f"{routes[r]['method']} {routes[r]['route']}", authorization_type="JWT", authorizer_id=authorizer.ref, target="integrations/" + integration.integration_id ) routes[r]["target_lambda"].add_permission( f"{r}_apigateway_invoke", principal=ServicePrincipal("apigateway.amazonaws.com"), source_arn=f"arn:aws:execute-api:{self.region}:{self.account}:{http_api.http_api_id}/*" ) mal_proxy_integration = HttpIntegration( self, "mal_proxy_integration", http_api=http_api, integration_type=HttpIntegrationType.HTTP_PROXY, integration_uri="https://api.myanimelist.net/v2/{proxy}", method=HttpMethod.ANY, payload_format_version=PayloadFormatVersion.VERSION_1_0, ) CfnRoute( self, "mal_proxy_route", api_id=http_api.http_api_id, route_key="GET /mal_proxy/{proxy+}", authorization_type="JWT", authorizer_id=authorizer.ref, target="integrations/" + mal_proxy_integration.integration_id, ) stage = CfnStage( self, "live", api_id=http_api.http_api_id, auto_deploy=True, default_route_settings=CfnStage.RouteSettingsProperty( throttling_burst_limit=10, throttling_rate_limit=5 ), stage_name="live" ) HttpApiMapping( self, "mapping", api=http_api, domain_name=domain_name, stage=stage )
class Movies(core.Stack): def __init__(self, app: core.App, id: str, domain_name: str, **kwargs) -> None: super().__init__(app, id, **kwargs) self.domain_name = domain_name self.layers = {} self.lambdas = {} self._create_tables() self._create_lambdas_config() self._create_layers() self._create_lambdas() self._create_gateway() def _create_tables(self): self.movies_table = Table( self, "movies_table", table_name="movies", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.movies_table.add_global_secondary_index( partition_key=Attribute(name="tmdb_id", type=AttributeType.NUMBER), index_name="tmdb_id" ) def _create_lambdas_config(self): self.lambdas_config = { "api-movies_by_id": { "layers": ["utils", "databases"], "variables": { "MOVIES_DATABASE_NAME": self.movies_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:GetItem"], resources=[self.movies_table.table_arn] ) ], "timeout": 3, "memory": 128 }, "api-movies": { "layers": ["utils", "databases"], "variables": { "MOVIES_DATABASE_NAME": self.movies_table.table_name, "LOG_LEVEL": "INFO", }, "policies": [ PolicyStatement( actions=["dynamodb:Query"], resources=[f"{self.movies_table.table_arn}/index/tmdb_id"] ), PolicyStatement( actions=["dynamodb:UpdateItem"], resources=[self.movies_table.table_arn] ), ], "timeout": 10, "memory": 128 }, } def _create_layers(self): if os.path.isdir(BUILD_FOLDER): shutil.rmtree(BUILD_FOLDER) os.mkdir(BUILD_FOLDER) for layer in os.listdir(LAYERS_DIR): layer_folder = os.path.join(LAYERS_DIR, layer) build_folder = os.path.join(BUILD_FOLDER, layer) shutil.copytree(layer_folder, build_folder) requirements_path = os.path.join(build_folder, "requirements.txt") if os.path.isfile(requirements_path): packages_folder = os.path.join(build_folder, "python", "lib", "python3.8", "site-packages") # print(f"Installing layer requirements to target: {os.path.abspath(packages_folder)}") subprocess.check_output(["pip", "install", "-r", requirements_path, "-t", packages_folder]) clean_pycache() self.layers[layer] = LayerVersion( self, layer, layer_version_name=f"movies-{layer}", code=Code.from_asset(path=build_folder), compatible_runtimes=[Runtime.PYTHON_3_8], ) def _create_lambdas(self): for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) self.lambdas[name] = Function( self, name, code=Code.from_asset(root), handler="__init__.handle", runtime=Runtime.PYTHON_3_8, layers=layers, function_name=name, environment=lambda_config["variables"], role=lambda_role, timeout=Duration.seconds(lambda_config["timeout"]), memory_size=lambda_config["memory"] ) def _create_gateway(self): cert = Certificate( self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS ) domain_name = DomainName( self, "domain", domain_name=self.domain_name, certificate=cert, security_policy=SecurityPolicy.TLS_1_2 ) http_api = HttpApi( self, "movies_gateway", create_default_stage=False, api_name="movies", cors_preflight=CorsPreflightOptions( allow_methods=[HttpMethod.GET, HttpMethod.POST], allow_origins=["https://moshan.tv", "https://beta.moshan.tv"], allow_headers=["authorization", "content-type"] ) ) authorizer = CfnAuthorizer( self, "cognito", api_id=http_api.http_api_id, authorizer_type="JWT", identity_source=["$request.header.Authorization"], name="cognito", jwt_configuration=CfnAuthorizer.JWTConfigurationProperty( audience=["68v5rahd0sdvrmf7fgbq2o1a9u"], issuer="https://cognito-idp.eu-west-1.amazonaws.com/eu-west-1_sJ3Y4kSv6" ) ) routes = { "get_movies": { "method": "GET", "route": "/movies", "target_lambda": self.lambdas["api-movies"] }, "post_movies": { "method": "POST", "route": "/movies", "target_lambda": self.lambdas["api-movies"] }, "get_movies_by_id": { "method": "GET", "route": "/movies/{id}", "target_lambda": self.lambdas["api-movies_by_id"] }, } for r in routes: integration = HttpIntegration( self, f"{r}_integration", http_api=http_api, integration_type=HttpIntegrationType.LAMBDA_PROXY, integration_uri=routes[r]["target_lambda"].function_arn, method=getattr(HttpMethod, routes[r]["method"]), payload_format_version=PayloadFormatVersion.VERSION_2_0, ) CfnRoute( self, r, api_id=http_api.http_api_id, route_key=f"{routes[r]['method']} {routes[r]['route']}", authorization_type="JWT", authorizer_id=authorizer.ref, target="integrations/" + integration.integration_id ) routes[r]["target_lambda"].add_permission( f"{r}_apigateway_invoke", principal=ServicePrincipal("apigateway.amazonaws.com"), source_arn=f"arn:aws:execute-api:{self.region}:{self.account}:{http_api.http_api_id}/*" ) stage = CfnStage( self, "live", api_id=http_api.http_api_id, auto_deploy=True, default_route_settings=CfnStage.RouteSettingsProperty( throttling_burst_limit=10, throttling_rate_limit=5 ), stage_name="live" ) HttpApiMapping( self, "mapping", api=http_api, domain_name=domain_name, stage=stage )
def __init__(self, scope: core.Construct, id: str, table: dynamodb.Table, **kwargs) -> None: super().__init__(scope, id, **kwargs) role = iam.Role( self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], ) table.grant_write_data(role) yfinance_layer = lambda_.LayerVersion( self, 'YahooFinance', code=lambda_.Code.from_asset('src/yfinance'), compatible_runtimes=[ lambda_.Runtime.PYTHON_3_6, lambda_.Runtime.PYTHON_3_7, lambda_.Runtime.PYTHON_3_8, ]) function = lambda_.Function( self, 'Download', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('src/download'), handler='download.handler', timeout=core.Duration.minutes(1), memory_size=1024, role=role, layers=[yfinance_layer], environment={'TABLE_NAME': table.table_name}) for job in IMPORT_JOBS: target = events_targets.LambdaFunction( handler=function, event=events.RuleTargetInput.from_object({ 'market': job['market'], 'tickers': job['tickers'], 'period': '3d', })) events.Rule( self, 'DailyImport{}'.format(job['market']), targets=[target], schedule=events.Schedule.cron( year='*', month='*', week_day='MON-FRI', # Only on days which markets are open hour=job['closing_hour'], minute='5', # 5 minutes after closing ), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) table_name = 'items' items_graphql_api = CfnGraphQLApi(self, 'ItemsApi', name='items-api', authentication_type='API_KEY') CfnApiKey(self, 'ItemsApiKey', api_id=items_graphql_api.attr_api_id) api_schema = CfnGraphQLSchema(self, 'ItemsSchema', api_id=items_graphql_api.attr_api_id, definition=f"""\ type {table_name} {{ {table_name}Id: ID! name: String }} type Paginated{table_name} {{ items: [{table_name}!]! nextToken: String }} type Query {{ all(limit: Int, nextToken: String): Paginated{table_name}! getOne({table_name}Id: ID!): {table_name} }} type Mutation {{ save(name: String!): {table_name} delete({table_name}Id: ID!): {table_name} }} type Schema {{ query: Query mutation: Mutation }}""") items_table = Table( self, 'ItemsTable', table_name=table_name, partition_key=Attribute(name=f'{table_name}Id', type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, stream=StreamViewType.NEW_IMAGE, # The default removal policy is RETAIN, which means that cdk # destroy will not attempt to delete the new table, and it will # remain in your account until manually deleted. By setting the # policy to DESTROY, cdk destroy will delete the table (even if it # has data in it) removal_policy=core.RemovalPolicy. DESTROY # NOT recommended for production code ) items_table_role = Role( self, 'ItemsDynamoDBRole', assumed_by=ServicePrincipal('appsync.amazonaws.com')) items_table_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess')) data_source = CfnDataSource( self, 'ItemsDataSource', api_id=items_graphql_api.attr_api_id, name='ItemsDynamoDataSource', type='AMAZON_DYNAMODB', dynamo_db_config=CfnDataSource.DynamoDBConfigProperty( table_name=items_table.table_name, aws_region=self.region), service_role_arn=items_table_role.role_arn) get_one_resolver = CfnResolver( self, 'GetOneQueryResolver', api_id=items_graphql_api.attr_api_id, type_name='Query', field_name='getOne', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "GetItem", "key": {{ "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id) }} }}""", response_mapping_template="$util.toJson($ctx.result)") get_one_resolver.add_depends_on(api_schema) get_all_resolver = CfnResolver( self, 'GetAllQueryResolver', api_id=items_graphql_api.attr_api_id, type_name='Query', field_name='all', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "Scan", "limit": $util.defaultIfNull($ctx.args.limit, 20), "nextToken": $util.toJson($util.defaultIfNullOrEmpty($ctx.args.nextToken, null)) }}""", response_mapping_template="$util.toJson($ctx.result)") get_all_resolver.add_depends_on(api_schema) save_resolver = CfnResolver( self, 'SaveMutationResolver', api_id=items_graphql_api.attr_api_id, type_name='Mutation', field_name='save', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "PutItem", "key": {{ "{table_name}Id": {{ "S": "$util.autoId()" }} }}, "attributeValues": {{ "name": $util.dynamodb.toDynamoDBJson($ctx.args.name) }} }}""", response_mapping_template="$util.toJson($ctx.result)") save_resolver.add_depends_on(api_schema) delete_resolver = CfnResolver( self, 'DeleteMutationResolver', api_id=items_graphql_api.attr_api_id, type_name='Mutation', field_name='delete', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "DeleteItem", "key": {{ "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id) }} }}""", response_mapping_template="$util.toJson($ctx.result)") delete_resolver.add_depends_on(api_schema)