def _create_tables(self): self.movies_table = Table( self, "movies_table", table_name="movies", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.movies_table.add_global_secondary_index( partition_key=Attribute(name="tmdb_id", type=AttributeType.NUMBER), index_name="tmdb_id" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DynamoDB table = Table( self, "DynamoTableTable", partition_key=Attribute(name="pk", type=AttributeType.NUMBER), # パーテーションキー sort_key=Attribute(name="sk", type=AttributeType.STRING), # ソートキー billing_mode=BillingMode.PAY_PER_REQUEST, # オンデマンドに設定 removal_policy=core.RemovalPolicy. DESTROY, # Stackの削除と一緒にテーブルを削除する(オプション) )
def create_deploy_status_table(self) -> Resource: """コンポーネントの作成状況を保持するテーブル Table Schema * Partition_key * component_name * Sort_key * version * Item * bucket * s3_path * component_arn * pipeline_status * image_creating: コンテナイメージをビルド中 * image_faild: コンテナイメージの作成に失敗 * image_exists: コンテナイメージが存在する * component_exists: GGのコンポーネントが存在する * component_faild: 何らかの理由でコンポーネントの登録に失敗した * create_deployment: * update_time * deployment_status * IN_PROGRESS * ACTIVE * CANCELLED * deploy_group * job_id Returns: Resource: DynamoDB Table """ table_name = f"{self.stack_name}_{self.component_id}_" + "deploy_status" table = Table( self, id=table_name, table_name=table_name, partition_key=Attribute(name="component_name", type=AttributeType.STRING), # パーテーションキー sort_key=Attribute(name="version", type=AttributeType.STRING), # ソートキー removal_policy=RemovalPolicy. DESTROY, # Stackの削除と一緒にテーブルを削除する(オプション) ) return table
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) table_name = "posts2" function_name = "cl2" email = "*****@*****.**" table = Table( self, "cl_posts", table_name=table_name, partition_key=Attribute(name="url", type=AttributeType.STRING), time_to_live_attribute="ttl", ) function = PythonFunction( self, "cl_function", function_name=function_name, entry="src", index="app.py", runtime=Runtime.PYTHON_3_8, environment={ "cl_email": email, "cl_table_name": table_name }, timeout=Duration.seconds(300), initial_policy=[ PolicyStatement( actions=["ses:SendEmail", "ses:VerifyEmailIdentity"], resources=[ f"arn:aws:ses:{self.region}:{self.account}:identity/{email}" ], ), PolicyStatement( actions=[ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem" ], resources=[table.table_arn], ), ], ) with open("events/event.json") as f: event = json.load(f) Rule( self, "cl_schedule", schedule=Schedule.expression("cron(0 19 * * ? *)"), targets=[ LambdaFunction(function, event=RuleTargetInput.from_object(event)) ], )
def __init__(self, scope: Construct, id: str, table_name: str) -> None: super().__init__(scope, id) primary_key = Attribute(name="id", type=AttributeType.STRING) self.table = Table( self, "SmolTable", billing_mode=BillingMode.PAY_PER_REQUEST, partition_key=primary_key, point_in_time_recovery=True, table_name=table_name, )
def _create_tables(self): self.shows_table = Table( self, "shows_table", table_name="shows", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.shows_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id") self.episodes_table = Table( self, "episodes_table", table_name="shows-eps", partition_key=Attribute(name="show_id", type=AttributeType.STRING), sort_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.episodes_table.add_local_secondary_index(sort_key=Attribute( name="id", type=AttributeType.STRING), index_name="episode_id") self.episodes_table.add_global_secondary_index(partition_key=Attribute( name="tvmaze_id", type=AttributeType.NUMBER), index_name="tvmaze_id")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # =======> DynamoDB gsi_name = "HogeGSI" table = Table( self, "DynamoTableGsiTable", partition_key=Attribute(name="pk", type=AttributeType.NUMBER), sort_key=Attribute(name="sk", type=AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, ) table.add_global_secondary_index( index_name=gsi_name, partition_key=Attribute(name="gsi_pk", type=AttributeType.NUMBER), sort_key=Attribute(name="gsi_sk", type=AttributeType.STRING), projection_type=ProjectionType.KEYS_ONLY, ) # =======> Lambda lambda_ = aws_lambda.Function( self, "LambdaDynamoTableGsi", code=aws_lambda.Code.asset("lambdas/dynamo_table_gsi"), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", environment={ "TABLE_NAME": table.table_name, "GSI_NAME": gsi_name }, ) table.grant_read_data(lambda_) # これがあればGSIに読み込み権限が付与される
def __init__(self, scope: cdk.Construct, construct_id: str, db_context: str, **kwargs) -> None: super().__init__(scope, construct_id) # setting the db context db = dict(self.node.try_get_context(db_context)) # Shortening some of the logic billing_mode = BillingMode.PROVISIONED if db[ "db_billing_mode"] == "provisioned" else BillingMode.PAY_PER_REQUEST pk = db["db_table_pk"] pk_type = AttributeType.STRING if db[ "db_table_pk_type"] == "string" else AttributeType.NUMBER table = Table( self, db["db_table"], table_name=db["db_table"], partition_key=Attribute(name=pk, type=pk_type), read_capacity=db["db_min_read_capacity"], write_capacity=db["db_min_write_capacity"], encryption=_ddb.TableEncryption.AWS_MANAGED, point_in_time_recovery=True, removal_policy=cdk.RemovalPolicy.DESTROY, billing_mode=billing_mode, time_to_live_attribute=db["db_ttl_attribute"], ) # Add read/write autoscaling enabled at X% utilization if db["db_billing_mode"] == "provisioned" and db[ "db_enable_autoscaling"]: read_scaling = table.auto_scale_read_capacity( min_capacity=db["db_min_read_capacity"], max_capacity=db["db_max_read_capacity"], ) read_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) write_scaling = table.auto_scale_write_capacity( min_capacity=db["db_min_write_capacity"], max_capacity=db["db_max_write_capacity"], ) write_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) self.table = table
def _create_tables(self): self.anime_table = Table( self, "anime_items", partition_key=Attribute(name="id", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="mal_id", type=AttributeType.NUMBER), index_name="mal_id" ) self.anime_table.add_global_secondary_index( partition_key=Attribute(name="broadcast_day", type=AttributeType.STRING), index_name="broadcast_day" ) self.anime_episodes = Table( self, "anime_episodes", partition_key=Attribute(name="anime_id", type=AttributeType.STRING), sort_key=Attribute(name="episode_number", type=AttributeType.NUMBER), billing_mode=BillingMode.PAY_PER_REQUEST, ) self.anime_episodes.add_local_secondary_index( sort_key=Attribute(name="id", type=AttributeType.STRING), index_name="episode_id" ) self.anime_episodes.add_global_secondary_index( partition_key=Attribute(name="anidb_id", type=AttributeType.NUMBER), index_name="anidb_id" ) self.anime_params = Table( self, "anime_params", partition_key=Attribute(name="name", type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, )
def __init__(self, scope: Stack): super().__init__(scope=scope, id=f"TestingStack", stack_name=f"TestingStack") table = Table( scope=self, id="TestingTable", partition_key=Attribute(name="id", type=AttributeType.STRING), stream=StreamViewType.NEW_IMAGE, removal_policy=RemovalPolicy.DESTROY, ) domain = Domain( scope=self, id="TestingElasticsearchDomain", version=ElasticsearchVersion.V7_7, capacity=CapacityConfig( # Use the cheapest instance available. data_node_instance_type="t3.small.elasticsearch", data_nodes=1, master_nodes=None, ), zone_awareness=ZoneAwarenessConfig(enabled=False), ebs=EbsOptions(enabled=True, volume_size=10, volume_type=EbsDeviceVolumeType.GP2), ) elasticsearch_index = ElasticsearchIndexResource( scope=self, name="TestingElasticsearchIndex", elasticsearch_domain=domain, index_prefix="testing_index", ) elasticsearch_cloner = ElasticsearchCloner( scope=self, id="TestingElasticsearchCloner", elasticsearch_index=elasticsearch_index, dynamodb_table=table, )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here table_name = "trainers" trainers_graphql_api = CfnGraphQLApi( self,'trainersApi', name="trainers-api", authentication_type='API_KEY' ) CfnApiKey( self,'TrainersApiKey', api_id = trainers_graphql_api.attr_api_id ) api_schema = CfnGraphQLSchema( self,"TrainersSchema", api_id = trainers_graphql_api.attr_api_id, definition=data_schema ) trainers_table = Table( self, 'TrainersTable', table_name=table_name, partition_key=Attribute( name='id', type=AttributeType.STRING, ), billing_mode=BillingMode.PAY_PER_REQUEST, stream=StreamViewType.NEW_IMAGE, # The default removal policy is RETAIN, which means that cdk # destroy will not attempt to delete the new table, and it will # remain in your account until manually deleted. By setting the # policy to DESTROY, cdk destroy will delete the table (even if it # has data in it) removal_policy=core.RemovalPolicy.DESTROY # NOT recommended for production code ) trainers_table_role = Role( self, 'TrainersDynamoDBRole', assumed_by=ServicePrincipal('appsync.amazonaws.com') ) trainers_table_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess' ) ) data_source = CfnDataSource( self, 'TrainersDataSource', api_id=trainers_graphql_api.attr_api_id, name='TrainersDynamoDataSource', type='AMAZON_DYNAMODB', dynamo_db_config=CfnDataSource.DynamoDBConfigProperty( table_name=trainers_table.table_name, aws_region=self.region ), service_role_arn=trainers_table_role.role_arn ) get_Trainer_resolver = CfnResolver( self, 'GetOneQueryResolver', api_id=trainers_graphql_api.attr_api_id, type_name='Query', field_name='getTrainer', data_source_name=data_source.name, request_mapping_template=get_trainer, response_mapping_template="$util.toJson($ctx.result)" ) get_Trainer_resolver.add_depends_on(api_schema) get_all_trainers_resolver = CfnResolver( self, 'GetAllQueryResolver', api_id=trainers_graphql_api.attr_api_id, type_name='Query', field_name='allTrainers', data_source_name=data_source.name, request_mapping_template=all_trainers, response_mapping_template="$util.toJson($ctx.result)" ) get_all_trainers_resolver.add_depends_on(api_schema) create_trainers_resolver = CfnResolver( self, 'CreateTrainerMutationResolver', api_id=trainers_graphql_api.attr_api_id, type_name='Mutation', field_name='createTrainer', data_source_name=data_source.name, request_mapping_template=create_trainer, response_mapping_template="$util.toJson($ctx.result)" ) create_trainers_resolver.add_depends_on(api_schema) update_trainers_resolver = CfnResolver( self,'UpdateMutationResolver', api_id=trainers_graphql_api.attr_api_id, type_name="Mutation", field_name="updateTrainers", data_source_name=data_source.name, request_mapping_template=update_trainer, response_mapping_template="$util.toJson($ctx.result)" ) update_trainers_resolver.add_depends_on(api_schema) delete_trainer_resolver = CfnResolver( self, 'DeleteMutationResolver', api_id=trainers_graphql_api.attr_api_id, type_name='Mutation', field_name='deleteTrainer', data_source_name=data_source.name, request_mapping_template=delete_trainer, response_mapping_template="$util.toJson($ctx.result)" ) #core delete_trainer_resolver.add_depends_on(api_schema)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) table_name = 'items' items_graphql_api = CfnGraphQLApi(self, 'ItemsApi', name='items-api', authentication_type='API_KEY') CfnApiKey(self, 'ItemsApiKey', api_id=items_graphql_api.attr_api_id) api_schema = CfnGraphQLSchema(self, 'ItemsSchema', api_id=items_graphql_api.attr_api_id, definition=f"""\ type {table_name} {{ {table_name}Id: ID! name: String }} type Paginated{table_name} {{ items: [{table_name}!]! nextToken: String }} type Query {{ all(limit: Int, nextToken: String): Paginated{table_name}! getOne({table_name}Id: ID!): {table_name} }} type Mutation {{ save(name: String!): {table_name} delete({table_name}Id: ID!): {table_name} }} type Schema {{ query: Query mutation: Mutation }}""") items_table = Table( self, 'ItemsTable', table_name=table_name, partition_key=Attribute(name=f'{table_name}Id', type=AttributeType.STRING), billing_mode=BillingMode.PAY_PER_REQUEST, stream=StreamViewType.NEW_IMAGE, # The default removal policy is RETAIN, which means that cdk # destroy will not attempt to delete the new table, and it will # remain in your account until manually deleted. By setting the # policy to DESTROY, cdk destroy will delete the table (even if it # has data in it) removal_policy=core.RemovalPolicy. DESTROY # NOT recommended for production code ) items_table_role = Role( self, 'ItemsDynamoDBRole', assumed_by=ServicePrincipal('appsync.amazonaws.com')) items_table_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess')) data_source = CfnDataSource( self, 'ItemsDataSource', api_id=items_graphql_api.attr_api_id, name='ItemsDynamoDataSource', type='AMAZON_DYNAMODB', dynamo_db_config=CfnDataSource.DynamoDBConfigProperty( table_name=items_table.table_name, aws_region=self.region), service_role_arn=items_table_role.role_arn) get_one_resolver = CfnResolver( self, 'GetOneQueryResolver', api_id=items_graphql_api.attr_api_id, type_name='Query', field_name='getOne', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "GetItem", "key": {{ "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id) }} }}""", response_mapping_template="$util.toJson($ctx.result)") get_one_resolver.add_depends_on(api_schema) get_all_resolver = CfnResolver( self, 'GetAllQueryResolver', api_id=items_graphql_api.attr_api_id, type_name='Query', field_name='all', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "Scan", "limit": $util.defaultIfNull($ctx.args.limit, 20), "nextToken": $util.toJson($util.defaultIfNullOrEmpty($ctx.args.nextToken, null)) }}""", response_mapping_template="$util.toJson($ctx.result)") get_all_resolver.add_depends_on(api_schema) save_resolver = CfnResolver( self, 'SaveMutationResolver', api_id=items_graphql_api.attr_api_id, type_name='Mutation', field_name='save', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "PutItem", "key": {{ "{table_name}Id": {{ "S": "$util.autoId()" }} }}, "attributeValues": {{ "name": $util.dynamodb.toDynamoDBJson($ctx.args.name) }} }}""", response_mapping_template="$util.toJson($ctx.result)") save_resolver.add_depends_on(api_schema) delete_resolver = CfnResolver( self, 'DeleteMutationResolver', api_id=items_graphql_api.attr_api_id, type_name='Mutation', field_name='delete', data_source_name=data_source.name, request_mapping_template=f"""\ {{ "version": "2017-02-28", "operation": "DeleteItem", "key": {{ "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id) }} }}""", response_mapping_template="$util.toJson($ctx.result)") delete_resolver.add_depends_on(api_schema)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.prefix = ('infra-smartnumbers').lower() self.root_dir = str(Path(__file__).parents[1]) print(self.root_dir) self.log_retention = aws_logs.RetentionDays.ONE_WEEK # //////////////////////////////////////////////////////////////////////////////////////////// """ S3 Buckets """ # output bucket bucket_name = self.prefix + '-output-bucket' output_bucket = s3.Bucket(self, bucket_name, bucket_name=bucket_name, removal_policy=core.RemovalPolicy.DESTROY) # //////////////////////////////////////////////////////////////////////////////////////////// """ Dynamo Tables """ self.tables = [] calls_table_name = self.prefix + '-calls' calls_table = ddb.Table(self, calls_table_name, table_name=calls_table_name, partition_key=Attribute( name="id", type=AttributeType.STRING), sort_key=Attribute(name="number", type=AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, billing_mode=ddb.BillingMode.PAY_PER_REQUEST) self.tables.append(calls_table_name) operators_table_name = self.prefix + '-operators' operators_table = ddb.Table( self, operators_table_name, table_name=operators_table_name, partition_key=Attribute(name="id", type=AttributeType.STRING), sort_key=Attribute(name="prefix", type=AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, billing_mode=ddb.BillingMode.PAY_PER_REQUEST) self.tables.append(operators_table_name) # //////////////////////////////////////////////////////////////////////////////////////////// """ Lambdas """ self.lambdas = [] api_receiver_lambda = create_lambda(self, 'api_receiver') self.lambdas.append(api_receiver_lambda) calls_table.grant_read_write_data(api_receiver_lambda) operators_table.grant_read_write_data(api_receiver_lambda) matcher_call_operator_lambda = create_lambda(self, 'matcher_call_operator') self.lambdas.append(matcher_call_operator_lambda) calls_table.grant_read_write_data(matcher_call_operator_lambda) operators_table.grant_read_data(matcher_call_operator_lambda) output_bucket.grant_read_write(matcher_call_operator_lambda) # //////////////////////////////////////////////////////////////////////////////////////////// """ API Gateways """ api_endpoint = apigw.LambdaRestApi(self, self.prefix + '-gateway', handler=api_receiver_lambda, proxy=False) apigateway_integration_input = apigw.LambdaIntegration( api_receiver_lambda) apigateway_endpoint_input = api_endpoint.root.add_resource( 'input_data') apigateway_endpoint_input.add_method('POST', apigateway_integration_input) apigateway_integration_input = apigw.LambdaIntegration( matcher_call_operator_lambda) apigateway_endpoint_trigger = api_endpoint.root.add_resource( 'event_trigger') apigateway_endpoint_trigger.add_method('POST', apigateway_integration_input)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # SQS queue state_change_sqs = Queue( self, "state_change_sqs", visibility_timeout=core.Duration.seconds(60) ) # Dynamodb Tables # EC2 state changes tb_states = Table( self, "ec2_states", partition_key=Attribute(name="instance-id", type=AttributeType.STRING), sort_key=Attribute( name="time", type=AttributeType.STRING ), billing_mode=BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, stream=StreamViewType.NEW_IMAGE) # EC2 inventory tb_inventory = Table( self, "ec2_inventory", partition_key=Attribute(name="instance-id", type=AttributeType.STRING), sort_key=Attribute( name="time", type=AttributeType.STRING ), billing_mode=BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, stream=StreamViewType.KEYS_ONLY) # IAM policies - AWS managed basic_exec = ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole") sqs_access = ManagedPolicy(self, "LambdaSQSExecution", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "sqs:ReceiveMessage", "sqs:DeleteMessage", "sqs:GetQueueAttributes" ], resources=[state_change_sqs.queue_arn] )]) # IAM Policies pol_ec2_states_ro = ManagedPolicy(self, "pol_EC2StatesReadOnly", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DescribeStream", "dynamodb:GetRecords", "dynamodb:GetItem", "dynamodb:GetShardIterator", "dynamodb:ListStreams" ], resources=[tb_states.table_arn] )]) pol_ec2_states_rwd = ManagedPolicy( self, "pol_EC2StatesWriteDelete", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DeleteItem", "dynamodb:DescribeTable", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem" ], resources=[tb_states.table_arn] )]) pol_ec2_inventory_full = ManagedPolicy( self, "pol_EC2InventoryFullAccess", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "dynamodb:DeleteItem", "dynamodb:DescribeTable", "dynamodb:GetItem", "dynamodb:PutItem", "dynamodb:Query", "dynamodb:UpdateItem" ], resources=[tb_inventory.table_arn] )]) pol_lambda_describe_ec2 = ManagedPolicy( self, "pol_LambdaDescribeEC2", statements=[ PolicyStatement( effect=Effect.ALLOW, actions=[ "ec2:Describe*" ], resources=["*"] )]) # IAM Roles rl_event_capture = Role( self, 'rl_state_capture', assumed_by=ServicePrincipal('lambda.amazonaws.com'), managed_policies=[basic_exec, sqs_access, pol_ec2_states_rwd] ) rl_event_processor = Role( self, 'rl_state_processor', assumed_by=ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ basic_exec, pol_ec2_states_ro, pol_ec2_states_rwd, pol_ec2_inventory_full, pol_lambda_describe_ec2]) # event capture lambda lambda_event_capture = Function( self, "lambda_event_capture", handler="event_capture.handler", runtime=Runtime.PYTHON_3_7, code=Code.asset('event_capture'), role=rl_event_capture, events=[SqsEventSource(state_change_sqs)], environment={"state_table": tb_states.table_name} ) # event processor lambda lambda_event_processor = Function( self, "lambda_event_processor", handler="event_processor.handler", runtime=Runtime.PYTHON_3_7, code=Code.asset('event_processor'), role=rl_event_processor, events=[ DynamoEventSource( tb_states, starting_position=StartingPosition.LATEST) ], environment={ "inventory_table": tb_inventory.table_name, } ) # Cloudwatch Event event_ec2_change = Rule( self, "ec2_state_change", description="trigger on ec2 start, stop and terminate instances", event_pattern=EventPattern( source=["aws.ec2"], detail_type=["EC2 Instance State-change Notification"], detail={ "state": [ "running", "stopped", "terminated"] } ), targets=[aws_events_targets.SqsQueue(state_change_sqs)] ) # Outputs core.CfnOutput(self, "rl_state_capture_arn", value=rl_event_capture.role_arn) core.CfnOutput(self, "rl_state_processor_arn", value=rl_event_processor.role_arn) core.CfnOutput(self, "tb_states_arn", value=tb_states.table_arn) core.CfnOutput(self, "tb_inventory_arn", value=tb_inventory.table_arn) core.CfnOutput(self, "sqs_state_change", value=state_change_sqs.queue_arn)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) graphql_api = CfnGraphQLApi(self, 'WeatherApi', name='weather-api', authentication_type='API_KEY') CfnApiKey(self, 'WeatherApiKey', api_id=graphql_api.attr_api_id) api_schema = CfnGraphQLSchema(self, 'WeatherSchema', api_id=graphql_api.attr_api_id, definition=""" type Destination { id: ID! description: String! state: String! city: String! zip: String! conditions: Weather! } type Mutation { addDestination( id: ID, description: String!, state: String!, city: String!, zip: String! ): Destination! } type Query { getWeather(city: String!): Weather # Get a single value of type 'Post' by primary key. getDestination(id: ID!, zip: String): Destination getAllDestinations: [Destination] getDestinationsByState(state: String!): [Destination] } type Subscription { newDestination: Destination @aws_subscribe(mutations: ["addDestination"]) } type Weather { description: String current: String maxTemp: String minTemp: String } schema { query: Query mutation: Mutation subscription: Subscription } """) table_name = 'destinations' table = Table(self, 'DestinationsTable', table_name=table_name, partition_key=Attribute( name="id", type=AttributeType.STRING, ), billing_mode=BillingMode.PAY_PER_REQUEST, stream=StreamViewType.NEW_IMAGE) table_role = Role(self, 'DestinationsDynamoDBRole', assumed_by=ServicePrincipal('appsync.amazonaws.com')) table_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess')) data_source = CfnDataSource( self, 'DestinationsDataSource', api_id=graphql_api.attr_api_id, name='DestinationsDynamoDataSource', type='AMAZON_DYNAMODB', dynamo_db_config=CfnDataSource.DynamoDBConfigProperty( table_name=table.table_name, aws_region=self.region), service_role_arn=table_role.role_arn) lambdaFn = Function(self, "GetWeather", code=Code.asset(os.getcwd() + "/lambdas/weather/"), handler="weather.get", timeout=core.Duration.seconds(900), memory_size=128, runtime=Runtime.NODEJS_10_X, environment={'APPID': os.getenv('APPID')}) lambda_role = Role( self, 'WeatherLambdaRole', assumed_by=ServicePrincipal('appsync.amazonaws.com')) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name('AWSLambdaFullAccess')) lambda_source = CfnDataSource( self, 'WeatherDataSource', api_id=graphql_api.attr_api_id, name='WeatherCondition', type='AWS_LAMBDA', lambda_config=CfnDataSource.LambdaConfigProperty( lambda_function_arn=lambdaFn.function_arn), service_role_arn=lambda_role.role_arn) self.add_resolvers(graphql_api, api_schema, data_source=data_source, lambda_source=lambda_source)
def __init__(self, scope: core.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) # The code that defines your stack goes here this_dir = path.dirname(__file__) handler = lmb.Function(self, 'Handler', runtime=lmb.Runtime.PYTHON_3_7, handler='handler.handler', code=lmb.Code.from_asset( path.join(this_dir, 'lambda'))) alias = lmb.Alias(self, "HandlerAlias", alias_name="Current", version=handler.current_version) gw = apigw.LambdaRestApi( self, 'Gateway', description='Endpoint for a singple Lambda-powered web service', handler=alias, endpoint_types=[EndpointType.REGIONAL]) failure_alarm = cloudwatch.Alarm( self, "FailureAlarm", alarm_name=self.stack_name + '-' + '500Alarm', metric=cloudwatch.Metric(metric_name="5XXError", namespace="AWS/ApiGateway", dimensions={ "ApiName": "Gateway", }, statistic="Sum", period=core.Duration.minutes(1)), threshold=1, evaluation_periods=1) alarm500topic = sns.Topic(self, "Alarm500Topic", topic_name=self.stack_name + '-' + 'Alarm500TopicSNS') alarm500topic.add_subscription( subscriptions.EmailSubscription("*****@*****.**")) failure_alarm.add_alarm_action(cw_actions.SnsAction(alarm500topic)) codedeploy.LambdaDeploymentGroup( self, "DeploymentGroup", alias=alias, deployment_config=codedeploy.LambdaDeploymentConfig. CANARY_10_PERCENT_10_MINUTES, alarms=[failure_alarm]) # Create a dynamodb table table_name = self.stack_name + '-' + 'HelloCdkTable' table = dynamodb.Table(self, "TestTable", table_name=table_name, partition_key=Attribute( name="id", type=dynamodb.AttributeType.STRING)) table_name_id = cr.PhysicalResourceId.of(table.table_name) on_create_action = AwsSdkCall( action='putItem', service='DynamoDB', physical_resource_id=table_name_id, parameters={ 'Item': { 'id': { 'S': 'HOLA_CREATE' }, 'date': { 'S': datetime.today().strftime('%Y-%m-%d') }, 'epoch': { 'N': str(int(time.time())) } }, 'TableName': table_name }) on_update_action = AwsSdkCall( action='putItem', service='DynamoDB', physical_resource_id=table_name_id, parameters={ 'Item': { 'id': { 'S': 'HOLA_UPDATE' }, 'date': { 'S': datetime.today().strftime('%Y-%m-%d') }, 'epoch': { 'N': str(int(time.time())) } }, 'TableName': table_name }) cr.AwsCustomResource( self, "TestTableCustomResource", on_create=on_create_action, on_update=on_update_action, policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE)) # OUTPUT self.url_output = core.CfnOutput(self, 'Url', value=gw.url)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) dynamodb_table = _ddb.Table( self, id="lab2-cm-ddb", table_name="lab2-cm-order-status", partition_key=Attribute(name='ID', type=AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY # NOT for production ) eb = _eb.EventBus(self, id="lab2-cm-eventbus", event_bus_name="lab2-cm-eventbus") lambda_role = _iam.Role( self, id='lab2-cm-role', assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')) dynamodb_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) dynamodb_policy_statement.add_actions("dynamodb:*") dynamodb_policy_statement.add_resources(dynamodb_table.table_arn) lambda_role.add_to_policy(dynamodb_policy_statement) eventbridge_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) eventbridge_policy_statement.add_actions("events:*") eventbridge_policy_statement.add_resources(eb.event_bus_arn) lambda_role.add_to_policy(eventbridge_policy_statement) cloudwatch_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) cloudwatch_policy_statement.add_actions("logs:CreateLogGroup") cloudwatch_policy_statement.add_actions("logs:CreateLogStream") cloudwatch_policy_statement.add_actions("logs:PutLogEvents") cloudwatch_policy_statement.add_actions("logs:DescribeLogStreams") cloudwatch_policy_statement.add_resources("*") lambda_role.add_to_policy(cloudwatch_policy_statement) fn_lambda_invoice_service = aws_lambda.Function( self, "lab2-cm-invoiceService", code=aws_lambda.AssetCode("../lambda-functions/invoice-service/"), handler="app.lambda_handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_invoice_service.add_environment("TABLE_NAME", dynamodb_table.table_name) fn_lambda_fulfilment_service = aws_lambda.Function( self, "lab2-cm-fulfilmentService", code=aws_lambda.AssetCode( "../lambda-functions/fulfilment-service/"), handler="app.lambda_handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_fulfilment_service.add_environment("TABLE_NAME", dynamodb_table.table_name) fn_lambda_fulfilment_service.add_environment("EVENTBUS_NAME", eb.event_bus_name) fn_lambda_forecasting_service = aws_lambda.Function( self, "lab2-cm-forecastingService", code=aws_lambda.AssetCode( "../lambda-functions/forecasting-service/"), handler="app.lambda_handler", tracing=aws_lambda.Tracing.ACTIVE, timeout=core.Duration.seconds(30), role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_forecasting_service.add_environment( "TABLE_NAME", dynamodb_table.table_name) fn_lambda_order_service = aws_lambda.Function( self, "lab2-cm-orderService", code=aws_lambda.AssetCode("../lambda-functions/order-service/"), handler="app.lambda_handler", timeout=core.Duration.seconds(30), tracing=aws_lambda.Tracing.ACTIVE, role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_order_service.add_environment("TABLE_NAME", dynamodb_table.table_name) fn_lambda_order_service.add_environment("EVENTBUS_NAME", eb.event_bus_name) fn_lambda_logistic_service = aws_lambda.Function( self, "lab2-cm-logisticService", code=aws_lambda.AssetCode("../lambda-functions/logistic-service/"), handler="app.lambda_handler", timeout=core.Duration.seconds(30), tracing=aws_lambda.Tracing.ACTIVE, role=lambda_role, runtime=aws_lambda.Runtime.PYTHON_3_8) fn_lambda_logistic_service.add_environment("TABLE_NAME", dynamodb_table.table_name) eb_order_created_pattern = _eb.EventPattern( detail_type=["order_created"], ) eb_fulfilment_completed_pattern = _eb.EventPattern( detail_type=["fulfilment_completed"], ) eb_order_created_rule = _eb.Rule( self, id="lab2-cm-eventRule-order-created", description="Order created event", enabled=True, event_bus=eb, event_pattern=eb_order_created_pattern, rule_name="lab2-OrderCreated", targets=[ _ebt.LambdaFunction(handler=fn_lambda_invoice_service), _ebt.LambdaFunction(handler=fn_lambda_fulfilment_service), _ebt.LambdaFunction(handler=fn_lambda_forecasting_service) ]) eb_fulfilment_completed_rule = _eb.Rule( self, id="lab2-cm-eventRule-fulfilment-completed", description="Fulfilment completedevent", enabled=True, event_bus=eb, event_pattern=eb_fulfilment_completed_pattern, rule_name="lab2-FulfilmentCompleted", targets=[_ebt.LambdaFunction(handler=fn_lambda_logistic_service)]) api = _ag.RestApi( self, id='lab2-cm-api-gateway', ) api_lambda_integration = _ag.LambdaIntegration(fn_lambda_order_service) api.root.add_resource('order').add_method('GET', api_lambda_integration)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) """ Create Lambda Layer The packages should be stored in `python/lib/python3.7/site-packages` which translates to `/opt/python/lib/python3.7/site-packages` in AWS Lambda Refer here: https://stackoverflow.com/a/58702328/7999204 """ python_deps_layer = LayerVersion( self, "PythonDepsLayer", code=Code.from_asset("./python-deps-layer"), compatible_runtimes=[PYTHON_RUNTIME], description="A layer that contains Python Dependencies", ) """ Create DynamoDB Tables """ poll_table = Table( self, "PollTable", partition_key=Attribute(name="id", type=AttributeType.STRING), sort_key=Attribute(name="SK", type=AttributeType.STRING), read_capacity=10, write_capacity=10, stream=StreamViewType.NEW_IMAGE, ) # DynamoDB Lambda consumer worker aggregate_votes_function = Function( self, "AggregateVotesLambda", handler="ddb_stream.aggregate_vote_table", runtime=PYTHON_RUNTIME, code=Code.asset("./backend"), layers=[python_deps_layer], timeout=core.Duration.seconds(30), ) aggregate_votes_function.add_environment("POLL_TABLE", poll_table.table_name) # DynamoDB Stream (Lambda Event Source) poll_table.grant_stream_read(aggregate_votes_function) poll_table.grant_read_write_data(aggregate_votes_function) ddb_aggregate_votes_event_source = DynamoEventSource( poll_table, starting_position=StartingPosition.LATEST ) aggregate_votes_function.add_event_source(ddb_aggregate_votes_event_source) # DynamoDB main_page GSI poll_table.add_global_secondary_index( partition_key=Attribute(name="PK2", type=AttributeType.STRING), projection_type=ProjectionType.INCLUDE, index_name=MAIN_PAGE_GSI, non_key_attributes=["date", "question", "result"], ) """ Create AWS Cognito User Pool """ self.users = UserPool(self, "vote-user") """ HTTP API API Gateway with CORS """ api = HttpApi( self, "VoteHttpApi", cors_preflight={ "allow_headers": ["*"], "allow_methods": [ HttpMethod.GET, HttpMethod.HEAD, HttpMethod.OPTIONS, HttpMethod.POST, ], "allow_origins": ["*"], "max_age": core.Duration.days(10), }, ) """ HTTP API Lambda functions """ get_all_votes_function = api_lambda_function( self, "GetAllVoteLambda", "api.get_all_votes", api, "/vote", GET, [python_deps_layer], [poll_table], ) poll_table.grant_read_data(get_all_votes_function) get_vote_function = api_lambda_function( self, "GetVoteLambda", "api.get_vote_by_id", api, "/vote/{vote_id}", GET, [python_deps_layer], [poll_table], ) poll_table.grant_read_data(get_vote_function) create_poll_function = api_lambda_function( self, "CreatePollLambda", "api.create_poll", api, "/vote", POST, [python_deps_layer], [poll_table], ) poll_table.grant_write_data(create_poll_function) post_vote_function = api_lambda_function( self, "PostVoteLambda", "api.vote", api, "/vote/{vote_id}", POST, [python_deps_layer], [poll_table], ) """ Create SQS Queues """ voting_queue = Queue(self, "voting-queue") # SQS Consumer worker voting_to_ddb_function = Function( self, "VotingToDDBLambda", handler="sqs_worker.insert_to_vote_db_table", runtime=PYTHON_RUNTIME, code=Code.asset("./backend"), layers=[python_deps_layer], ) voting_to_ddb_function.add_environment("POLL_TABLE", poll_table.table_name) # SQS Queue to Lambda trigger mapping voting_to_ddb_event_source = SqsEventSource(voting_queue) voting_to_ddb_function.add_event_source(voting_to_ddb_event_source) poll_table.grant_read_write_data(voting_to_ddb_function) voting_queue.grant_send_messages(post_vote_function) post_vote_function.add_environment("VOTING_QUEUE_URL", voting_queue.queue_url) core.CfnOutput(self, "api-domain", value=api.url)
def __init__(self, scope: core.Construct, construct_id: str, db_context: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # setting the db context db = dict(self.node.try_get_context(db_context)) # Shortening some of the logic billing_mode = BillingMode.PROVISIONED if db[ "db_billing_mode"] == "provisioned" else BillingMode.PAY_PER_REQUEST pk = db["db_table_pk"] pk_type = AttributeType.STRING if db[ "db_table_pk_type"] == "string" else AttributeType.NUMBER sk = None if db["db_table_sk"] == "" else db["db_table_sk"] sk_type = AttributeType.STRING if db[ "db_table_sk_type"] == "string" else AttributeType.NUMBER gsi_projection_type = ProjectionType.ALL if db[ "db_gsi_projection"] == "all" else ProjectionType.KEYS_ONLY lsi_projection_type = ProjectionType.ALL if db[ "db_lsi_projection"] == "all" else ProjectionType.KEYS_ONLY if sk: table = Table( self, db["db_table"], table_name=db["db_table"], partition_key=Attribute(name=pk, type=pk_type), sort_key=Attribute(name=sk, type=sk_type), read_capacity=db["db_min_read_capacity"], write_capacity=db["db_min_write_capacity"], encryption=_ddb.TableEncryption.AWS_MANAGED, point_in_time_recovery=True, removal_policy=core.RemovalPolicy.DESTROY, billing_mode=billing_mode, ) else: table = Table( self, db["db_table"], table_name=db["db_table"], partition_key=Attribute(name=pk, type=pk_type), read_capacity=db["db_min_read_capacity"], write_capacity=db["db_min_write_capacity"], encryption=_ddb.TableEncryption.AWS_MANAGED, point_in_time_recovery=True, removal_policy=core.RemovalPolicy.DESTROY, billing_mode=billing_mode, ) # Add read/write autoscaling enabled at X% utilization if db["db_billing_mode"] == "provisioned" and db[ "db_enable_autoscaling"]: read_scaling = table.auto_scale_read_capacity( min_capacity=db["db_min_read_capacity"], max_capacity=db["db_max_read_capacity"], ) read_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) write_scaling = table.auto_scale_write_capacity( min_capacity=db["db_min_write_capacity"], max_capacity=db["db_max_write_capacity"], ) write_scaling.scale_on_utilization( target_utilization_percent=db["db_target_utilization"], ) # setting projection with keys or all if db["db_reverse_index"] and sk: table.add_global_secondary_index( partition_key=Attribute(name=sk, type=sk_type), sort_key=_ddb.Attribute(name=pk, type=pk_type), read_capacity=db["db_min_read_capacity"], write_capacity=db["db_min_write_capacity"], index_name='reverseIndex', projection_type=gsi_projection_type, ) table.auto_scale_global_secondary_index_read_capacity( index_name='reverseIndex', min_capacity=db["db_min_read_capacity"], max_capacity=db["db_max_read_capacity"], ) table.auto_scale_global_secondary_index_write_capacity( index_name='reverseIndex', min_capacity=db["db_min_write_capacity"], max_capacity=db["db_max_write_capacity"], ) else: print("No Reverse indexes created") # Add LSI with a projection of All if db["db_add_lsi"]: table.add_local_secondary_index( index_name='LSI1', projection_type=lsi_projection_type, sort_key=Attribute(name='LSISK', type=AttributeType.STRING), ) self.table = table