Пример #1
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        innerSfnPassState = sfn.Pass(self, 'PassState');

        innerSfn = sfn.StateMachine(self, 'InnerStepFunction',
            definition = innerSfnPassState,
            timeout=Duration.minutes(60)
        )

        task1 = tasks.StepFunctionsStartExecution(self, "StepFunction1",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task2 = tasks.StepFunctionsStartExecution(self, "StepFunction2",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task3 = tasks.StepFunctionsStartExecution(self, "StepFunction3",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        outer_sfn = sfn.StateMachine(self, "OuterStepFunction",
                definition=task1.next(task2).next(task3),
                timeout=Duration.minutes(60)
        )

        CfnOutput(self, "StepFunctionArn",
            value = outer_sfn.state_machine_arn,
            export_name = 'OuterStepFunctionArn',
            description = 'Outer Step Function arn')
Пример #2
0
    def __init__(self, scope: Construct, id: str, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self,
                                   "Table",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=10,
                                   write_capacity=5)

        # define the API gateway request handler. all API requests will go to the same function.
        handler = aws_lambda.Function(
            self,
            "UrlShortenerFunction",
            code=aws_lambda.Code.from_asset("./lambda"),
            handler="handler.main",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        # pass the table name to the handler through an environment variable and grant
        # the handler read/write permissions on the table.
        handler.add_environment('TABLE_NAME', table.table_name)
        table.grant_read_write_data(handler)

        # define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self,
                                           "UrlShortenerApi",
                                           handler=handler)

        # map go.waltersco.co to this api gateway endpoint
        # the domain name is a shared resource that can be accessed through the API in WaltersCoStack
        # NOTE: you can comment this out if you want to bypass the domain name mapping
        self.map_waltersco_subdomain('go', api)
Пример #3
0
 def cname_record(record_name, hosted_zone):
     _route53.CnameRecord(
         self, 'Route53Cname',
         domain_name=alb_dns,
         record_name=record_name,
         zone=hosted_zone,
         ttl=Duration.minutes(1)
     )
Пример #4
0
 def _setup_mysql_serverless(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.ServerlessCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql-serverless",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-serverless-cluster-wrangler",
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         scaling=rds.ServerlessScalingOptions(
             auto_pause=Duration.minutes(5),
             min_capacity=rds.AuroraCapacityUnit.ACU_1,
             max_capacity=rds.AuroraCapacityUnit.ACU_1,
         ),
         backup_retention=Duration.days(1),
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT),
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         enable_data_api=True,
     )
     secret = secrets.Secret(
         self,
         "aws-data-wrangler-mysql-serverless-secret",
         secret_name="aws-data-wrangler/mysql-serverless",
         description="MySQL serverless credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlServerlessSecretArn", value=secret.secret_arn)
     CfnOutput(self, "MysqlServerlessClusterArn", value=aurora_mysql.cluster_arn)
     CfnOutput(self, "MysqlServerlessAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlServerlessPort", value=str(port))
     CfnOutput(self, "MysqlServerlessDatabase", value=database)
     CfnOutput(self, "MysqlServerlessSchema", value=schema)
Пример #5
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        function = aws_lambda.Function(
            self,
            "SmileFunction",
            function_name="SmileFunction",
            code=aws_lambda.Code.from_asset("package"),
            handler="handler.main",
            timeout=Duration.minutes(5),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            tracing=aws_lambda.Tracing.ACTIVE,
        )
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create dynamo table
        demo_table = aws_dynamodb.Table(
            self,
            "demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING))

        # create producer lambda function
        producer_lambda = aws_lambda.Function(
            self,
            "producer_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.from_asset("./lambda/producer"))

        producer_lambda.add_environment("TABLE_NAME", demo_table.table_name)

        # grant permission to lambda to write to demo table
        demo_table.grant_write_data(producer_lambda)

        # create consumer lambda function
        consumer_lambda = aws_lambda.Function(
            self,
            "consumer_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.from_asset("./lambda/consumer"))

        consumer_lambda.add_environment("TABLE_NAME", demo_table.table_name)

        # grant permission to lambda to read from demo table
        demo_table.grant_read_data(consumer_lambda)

        # create a Cloudwatch Event rule
        one_minute_rule = aws_events.Rule(
            self,
            "one_minute_rule",
            schedule=aws_events.Schedule.rate(Duration.minutes(1)),
        )

        # Add target to Cloudwatch Event
        one_minute_rule.add_target(
            aws_events_targets.LambdaFunction(producer_lambda))
        one_minute_rule.add_target(
            aws_events_targets.LambdaFunction(consumer_lambda))
Пример #7
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self._account_id = os.environ["CDK_DEFAULT_ACCOUNT"]
        self._region = os.environ["CDK_DEFAULT_REGION"]

        self._queue = _sqs.Queue(
            self,
            "ApigwV2SqsLambdaQueue",
            visibility_timeout=Duration.seconds(300),
        )

        self._sqs_event_source = SqsEventSource(self._queue)

        self._fn = _lambda.Function(
            self,
            'SqsMessageHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='app.handler',
            code=_lambda.Code.from_asset(path='src'),
            timeout=Duration.minutes(3),
            memory_size=128,
            environment={
                'REGION': self._region,
                'ACCOUNT_ID': self._account_id
            },
        )

        self._fn.add_event_source(self._sqs_event_source)

        self._http_api = self._create_apigw_v2()

        self._integration_role = self._create_apigw_to_sqs_role()

        self._send_msg_route = self._create_sqs_send_msg_route()

        # Enable Auto Deploy
        self._stage = self._create_stage()

        # Outputs
        CfnOutput(self,
                  "API Endpoint",
                  description="API Endpoint",
                  value=self._http_api.attr_api_endpoint)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        detect_sentiment_task = sfn_tasks.CallAwsService(
            self,
            "DetectSentiment",
            service="comprehend",
            action="detectSentiment",
            iam_resources=["*"],
            parameters={
                "Text": "$.text",
                "LanguageCode": "en"
            })

        definition = detect_sentiment_task
        state_machine = sfn.StateMachine(self,
                                         "DetectSentimentStateMachine",
                                         definition=definition,
                                         timeout=Duration.minutes(5))

        CfnOutput(scope=self,
                  id='StateMachineArn',
                  value=state_machine.state_machine_arn)
Пример #9
0
    def __init__(self, scope: Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        rg_property = network_fw.CfnRuleGroup.RuleGroupProperty(
            rule_variables=None,
            rules_source=network_fw.CfnRuleGroup.RulesSourceProperty(
                stateless_rules_and_custom_actions=network_fw.CfnRuleGroup.
                StatelessRulesAndCustomActionsProperty(stateless_rules=[
                    network_fw.CfnRuleGroup.StatelessRuleProperty(
                        priority=10,
                        rule_definition=network_fw.CfnRuleGroup.
                        RuleDefinitionProperty(
                            actions=["aws:drop"],
                            match_attributes=network_fw.CfnRuleGroup.
                            MatchAttributesProperty(destinations=[
                                network_fw.CfnRuleGroup.AddressProperty(
                                    address_definition="127.0.0.1/32")
                            ])))
                ])))

        nf_rule_group = network_fw.CfnRuleGroup(
            scope=self,
            id='GuardDutyNetworkFireWallRuleGroup',
            capacity=100,
            rule_group_name='guardduty-network-firewall',
            type='STATELESS',
            description='Guard Duty network firewall rule group',
            tags=[CfnTag(key='Name', value='cfn.rule-group.stack')],
            rule_group=rg_property)
        """ https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rule-dlq.html#dlq-considerations """
        dlq_statemachine = sqs.Queue(self,
                                     'DLQStateMachine',
                                     queue_name='dlq_state_machine')

        guardduty_firewall_ddb = ddb.Table(
            scope=self,
            id=f'GuarddutyFirewallDDB',
            table_name='GuardDutyFirewallDDBTable',
            removal_policy=RemovalPolicy.DESTROY,
            partition_key=ddb.Attribute(name='HostIp',
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        """ IAM role for ddb permission """
        nf_iam_role = iam.Role(
            self,
            'DDBRole',
            role_name=f'ddb-nf-role-{env.region}',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["arn:aws:logs:*:*:*"],
                                actions=[
                                    "logs:CreateLogGroup",
                                    "logs:CreateLogStream", "logs:PutLogEvents"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    guardduty_firewall_ddb.table_arn,
                                    f"{guardduty_firewall_ddb.table_arn}/*"
                                ],
                                actions=[
                                    "dynamodb:PutItem", "dynamodb:GetItem",
                                    "dynamodb:Scan"
                                ]))

        nf_iam_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[nf_rule_group.ref, f"{nf_rule_group.ref}/*"],
                actions=[
                    "network-firewall:DescribeRuleGroup",
                    "network-firewall:UpdateRuleGroup"
                ]))

        record_ip_in_db = _lambda.Function(
            self,
            'RecordIpInDB',
            function_name='record-ip-in-ddb',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='addIPToDDB.handler',
            environment=dict(ACLMETATABLE=guardduty_firewall_ddb.table_name),
            role=nf_iam_role)
        """
        https://docs.amazonaws.cn/en_us/eventbridge/latest/userguide/eb-event-patterns-content-based-filtering.html
        """
        record_ip_task = step_fn_task.LambdaInvoke(
            self,
            'RecordIpDDBTask',
            lambda_function=record_ip_in_db,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Relevant fields from the GuardDuty / Security Hub finding",
                "HostIp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4",
                "Timestamp.$":
                "$.detail.findings[0].ProductFields.aws/guardduty/service/eventLastSeen",
                "FindingId.$": "$.id",
                "AccountId.$": "$.account",
                "Region.$": "$.region"
            }),
            result_path='$',
            payload_response_only=True)

        firewall_update_rule = _lambda.Function(
            scope=self,
            id='GuardDutyUpdateNetworkFirewallRule',
            function_name='gurdduty-update-networkfirewal-rule-group',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='updateNetworkFireWall.handler',
            environment=dict(
                FIREWALLRULEGROUP=nf_rule_group.ref,
                RULEGROUPPRI='30000',
                CUSTOMACTIONNAME='GuardDutytoFirewall',
                CUSTOMACTIONVALUE='gurdduty-update-networkfirewal-rule-group'),
            role=nf_iam_role)

        firewall_update_rule_task = step_fn_task.LambdaInvoke(
            self,
            'FirewallUpdateRuleTask',
            lambda_function=firewall_update_rule,
            input_path='$',
            result_path='$',
            payload_response_only=True)

        firewall_no_update_job = step_fn.Pass(self, 'No Firewall change')
        notify_failure_job = step_fn.Fail(self,
                                          'NotifyFailureJob',
                                          cause='Any Failure',
                                          error='Unknown')

        send_to_slack = _lambda.Function(
            scope=self,
            id='SendAlertToSlack',
            function_name='gurdduty-networkfirewal-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendSMSToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_slack_task = step_fn_task.LambdaInvoke(
            scope=self,
            id='LambdaToSlackDemo',
            lambda_function=send_to_slack,
            input_path='$',
            result_path='$')

        is_new_ip = step_fn.Choice(self, "New IP?")
        is_block_succeed = step_fn.Choice(self, "Block sucessfully?")

        definition = step_fn.Chain \
            .start(record_ip_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=notify_failure_job)) \
            .next(is_new_ip
                  .when(step_fn.Condition.boolean_equals('$.NewIP', True),
                        firewall_update_rule_task
                            .add_retry(errors=["States.TaskFailed"],
                                       interval=Duration.seconds(2),
                                       max_attempts=2
                                       )
                            .add_catch(errors=["States.ALL"], handler=notify_failure_job)
                            .next(
                                is_block_succeed
                                    .when(step_fn.Condition.boolean_equals('$.Result', False), notify_failure_job)
                                    .otherwise(send_slack_task)
                            )
                        )
                  .otherwise(firewall_no_update_job)
                  )

        guardduty_state_machine = step_fn.StateMachine(
            self,
            'GuarddutyStateMachine',
            definition=definition,
            timeout=Duration.minutes(5),
            state_machine_name='guardduty-state-machine')

        event.Rule(
            scope=self,
            id='EventBridgeCatchIPv4',
            description="Security Hub - GuardDuty findings with remote IP",
            rule_name='guardduty-catch-ipv4',
            event_pattern=event.EventPattern(
                account=['123456789012'],
                detail_type=["GuardDuty Finding"],
                source=['aws.securityhub'],
                detail={
                    "findings": {
                        "ProductFields": {
                            "aws/guardduty/service/action/networkConnectionAction/remoteIpDetails/ipAddressV4":
                            [{
                                "exists": True
                            }]
                        }
                    }
                }),
            targets=[
                event_target.SfnStateMachine(
                    machine=guardduty_state_machine,
                    dead_letter_queue=dlq_statemachine)
            ])
        """ Send other findings to slack """
        send_finding_to_slack = _lambda.Function(
            self,
            'SendFindingToSlack',
            function_name='send-finding-to-slack',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="sendFindingToSlack.handler",
            code=_lambda.Code.from_asset('lambda_fns'))

        send_findings_task = step_fn_task.LambdaInvoke(
            self,
            'SendFindingToSlackTask',
            lambda_function=send_finding_to_slack,
            payload=step_fn.TaskInput.from_object({
                "comment":
                "Others fields from the GuardDuty / Security Hub finding",
                "severity.$":
                "$.detail.findings[0].Severity.Label",
                "Account_ID.$":
                "$.account",
                "Finding_ID.$":
                "$.id",
                "Finding_Type.$":
                "$.detail.findings[0].Types",
                "Region.$":
                "$.region",
                "Finding_description.$":
                "$.detail.findings[0].Description"
            }),
            result_path='$')

        slack_failure_job = step_fn.Fail(self,
                                         'SlackNotifyFailureJob',
                                         cause='Any Failure',
                                         error='Unknown')

        finding_definition = step_fn.Chain \
            .start(send_findings_task
                   .add_retry(errors=["States.TaskFailed"],
                              interval=Duration.seconds(2),
                              max_attempts=2)
                   .add_catch(errors=["States.ALL"], handler=slack_failure_job))

        sechub_findings_state_machine = step_fn.StateMachine(
            self,
            'SecHubFindingsStateMachine',
            definition=finding_definition,
            timeout=Duration.minutes(5),
            state_machine_name='sechub-finding-state-machine')

        event.Rule(scope=self,
                   id='EventBridgeFindings',
                   description="Security Hub - GuardDuty findings others",
                   rule_name='others-findings',
                   event_pattern=event.EventPattern(
                       account=['123456789012'],
                       source=['aws.securityhub'],
                       detail_type=['Security Hub Findings - Imported'],
                       detail={"severity": [5, 8]}),
                   targets=[
                       event_target.SfnStateMachine(
                           machine=sechub_findings_state_machine,
                           dead_letter_queue=dlq_statemachine)
                   ])
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Lambda Handlers Definitions

        submit_lambda = _lambda.Function(
            self,
            'submitLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/submit'))

        status_lambda = _lambda.Function(
            self,
            'statusLambda',
            handler='lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_9,
            code=_lambda.Code.from_asset('lambdas/status'))

        # Step functions Definition

        submit_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Submit Job",
            lambda_function=submit_lambda,
            output_path="$.Payload",
        )

        wait_job = _aws_stepfunctions.Wait(
            self,
            "Wait 30 Seconds",
            time=_aws_stepfunctions.WaitTime.duration(Duration.seconds(30)))

        status_job = _aws_stepfunctions_tasks.LambdaInvoke(
            self,
            "Get Status",
            lambda_function=status_lambda,
            output_path="$.Payload",
        )

        fail_job = _aws_stepfunctions.Fail(self,
                                           "Fail",
                                           cause='AWS Batch Job Failed',
                                           error='DescribeJob returned FAILED')

        succeed_job = _aws_stepfunctions.Succeed(
            self, "Succeeded", comment='AWS Batch Job succeeded')

        # Create Chain

        definition = submit_job.next(wait_job)\
            .next(status_job)\
            .next(_aws_stepfunctions.Choice(self, 'Job Complete?')
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'FAILED'), fail_job)
                  .when(_aws_stepfunctions.Condition.string_equals('$.status', 'SUCCEEDED'), succeed_job)
                  .otherwise(wait_job))

        # Create state machine
        sm = _aws_stepfunctions.StateMachine(
            self,
            "StateMachine",
            definition=definition,
            timeout=Duration.minutes(5),
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Parameters
        notification_email_address = CfnParameter(
            self,
            "notification_email_address",
            type="String",
            min_length=7,
            description=
            "The E-mail address subscribed to notifications when an S3 bucket is detected as open to the public."
        )
        profiling = CfnParameter(
            self,
            "profiling",
            type="String",
            allowed_values=["TRUE", "FALSE"],
            default="FALSE",
            description=
            "Enable Profiling on Lambda functions: TRUE or FALSE. Default: FALSE"
        )
        tracing = CfnParameter(
            self,
            "tracing",
            type="String",
            allowed_values=["TRUE", "FALSE"],
            default="FALSE",
            description=
            "Enable tracing on Lambda functions: TRUE or FALSE. Default: FALSE"
        )
        trusted_advisor_refresh_minutes = CfnParameter(
            self,
            "trusted_advisor_refresh_minutes",
            type="Number",
            default=6,
            min_value=5,
            max_value=1440,
            description=
            "Number of minutes to schedule a trusted advisor refresh. Default: 6"
        )
        enable_profiling = profiling.value_as_string == 'TRUE'
        enable_tracing = aws_lambda.Tracing.ACTIVE
        if tracing.value_as_string != 'TRUE':
            enable_tracing = aws_lambda.Tracing.DISABLED

        # Layers
        dependencies_layer = aws_lambda.LayerVersion(
            self,
            "dependenciesLayer",
            code=aws_lambda.Code.from_asset(
                "lambda_functions/dependencies_layer/"),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
        )
        # create SNS target
        email_notification_topic = sns.Topic(
            self,
            'taEmailNotificationTopic',
            display_name='taEmailNotificationTopic',
            topic_name='taEmailNotificationTopic')
        # add subscription
        sns.Subscription(self,
                         'emailSubscription',
                         protocol=sns.SubscriptionProtocol.EMAIL,
                         endpoint=notification_email_address.value_as_string,
                         topic=email_notification_topic)

        default_event_bus = events.EventBus.from_event_bus_name(
            self, 'default', 'default')
        ta_event_pattern = events.EventPattern(
            source=['aws.trustedadvisor'],
            detail_type=['Trusted Advisor Check Item Refresh Notification'],
            detail={
                'check-name': ['Amazon S3 Bucket Permissions'],
                'status': ['WARN', 'ERROR']
            })
        # Lambda function to trigger when TA check flagged
        ta_check_s3_open_lambda_function_code = aws_lambda.AssetCode(
            'lambda_functions/s3openbucket')
        ta_check_s3_open_lambda_function = aws_lambda.Function(
            self,
            'ta_s3_open_bucket',
            code=ta_check_s3_open_lambda_function_code,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='s3openbucket.lambda_handler',
            description='Function Triggered from Trusted Advisor '
            'to Block public access to an S3 Bucket',
            function_name='ta-check-s3-open-lambda-function',
            memory_size=128,
            profiling=enable_profiling,
            tracing=enable_tracing,
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
            timeout=Duration.seconds(10),
            environment={'topic_arn': email_notification_topic.topic_arn},
            initial_policy=[
                aws_iam.PolicyStatement(actions=[
                    's3:GetBucketPolicy', 's3:DeleteBucketPolicy',
                    's3:PutBucketPolicy', 's3:GetAccountPublicAccessBlock',
                    's3:GetBucketPublicAccessBlock',
                    's3:PutAccountPublicAccessBlock',
                    's3:PutBucketPublicAccessBlock', 's3:GetBucketAcl',
                    's3:GetObjectAcl', 's3:PutBucketAcl', 's3:PutObjectAcl'
                ],
                                        effect=aws_iam.Effect.ALLOW,
                                        resources=['*']),
                aws_iam.PolicyStatement(
                    actions=['SNS:Publish'],
                    effect=aws_iam.Effect.ALLOW,
                    resources=[email_notification_topic.topic_arn])
            ])
        events.Rule(
            self,
            's3PublicBucketRule',
            description=
            'Blocks Public access on an S3 bucket once detected by Trusted Advisor',
            event_pattern=ta_event_pattern,
            event_bus=default_event_bus,
            targets=[targets.LambdaFunction(ta_check_s3_open_lambda_function)])
        # Refresh TA check every X minutes
        # Lambda function to trigger when TA check flagged
        ta_refresh_lambda_function_code = aws_lambda.AssetCode(
            'lambda_functions/refreshTrustedAdvisorCheck')
        ta_refresh_lambda_function = aws_lambda.Function(
            self,
            'refresh_ta_check',
            code=ta_refresh_lambda_function_code,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='refreshTrustedAdvisorCheck.lambda_handler',
            description='Refreshes Trusted Advisor checks',
            function_name='ta-refresh-ta-check-lambda-function',
            memory_size=128,
            profiling=enable_profiling,
            tracing=enable_tracing,
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
            timeout=Duration.seconds(5),
            initial_policy=[
                aws_iam.PolicyStatement(actions=[
                    'support:DescribeTrustedAdvisorChecks',
                    'support:RefreshTrustedAdvisorCheck',
                    'support:DescribeTrustedAdvisorCheckResult'
                ],
                                        effect=aws_iam.Effect.ALLOW,
                                        resources=['*'])
            ])
        ta_refresh_lambda_function.add_layers(dependencies_layer)
        events.Rule(
            self,
            'refreshTAS3BucketPermissionsRule',
            schedule=events.Schedule.rate(
                Duration.minutes(
                    trusted_advisor_refresh_minutes.value_as_number)),
            rule_name='refreshTAS3BucketPermissionsRule',
            targets=[targets.LambdaFunction(ta_refresh_lambda_function)])
Пример #12
0
    def create_docker_image_buildproject(self) -> Resource:
        """Greengrassのコンポーネント用に推論アプリのdockerイメージをビルドするcodebuild

        Returns:
            Resource: codebuild
        """

        codebuild_name = f"{self.stack_name}_{self.component_id}_build_component"
        role_name = self.get_role_name("codebuild")
        codebuild_role = aws_iam.Role(
            self,
            id=role_name,
            assumed_by=aws_iam.ServicePrincipal("codebuild.amazonaws.com"),
            role_name=role_name,
            path="/service-role/")
        codebuild_role.attach_inline_policy(
            aws_iam.Policy(
                self,
                "DefaultCodeBuildPermissions",
                document=aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup", "logs:CreateLogStream",
                            "logs:PutLogEvents"
                        ],
                        resources=[
                            f"arn:aws:logs:{Aws.REGION}:{Aws.ACCOUNT_ID}:log-group:/aws/codebuild/{codebuild_name}",
                            f"arn:aws:logs:{Aws.REGION}:{Aws.ACCOUNT_ID}:log-group:/aws/codebuild/{codebuild_name}:*"
                        ]),
                    aws_iam.PolicyStatement(
                        actions=[
                            "codebuild:CreateReportGroup",
                            "codebuild:CreateReport", "codebuild:UpdateReport",
                            "codebuild:BatchPutTestCases",
                            "codebuild:BatchPutCodeCoverages"
                        ],
                        resources=[
                            f"arn:aws:codebuild:{Aws.REGION}:{Aws.ACCOUNT_ID}:report-group/{codebuild_name}-*"
                        ]),
                    aws_iam.PolicyStatement(
                        actions=[
                            "s3:PutObject", "s3:GetObject",
                            "s3:GetObjectVersion", "s3:GetBucketAcl",
                            "s3:GetBucketLocation"
                        ],
                        resources=[
                            "arn:aws:s3:::{}/*".
                            format("ml-model-build-input-us-east-1")
                        ]),
                    aws_iam.PolicyStatement(
                        actions=["ecr:GetAuthorizationToken"], resources=["*"
                                                                          ]),
                    aws_iam.PolicyStatement(
                        actions=[
                            "ecr:BatchCheckLayerAvailability",
                            "ecr:GetDownloadUrlForLayer",
                            "ecr:GetRepositoryPolicy",
                            "ecr:DescribeRepositories", "ecr:ListImages",
                            "ecr:DescribeImages", "ecr:BatchGetImage",
                            "ecr:InitiateLayerUpload", "ecr:UploadLayerPart",
                            "ecr:CompleteLayerUpload", "ecr:PutImage"
                        ],
                        resources=[
                            self._component_ecr.repository_arn,
                            self._component_base_ecr.repository_arn
                        ]),
                    aws_iam.PolicyStatement(
                        actions=["codecommit:GitPull"
                                 ],
                        resources=[
                            self._component_source_repository.repository_arn
                        ])
                ])))

        buildspecfile = os.path.dirname(
            __file__) + "/buildspec/componentimage.yaml"
        with open(buildspecfile, "r") as yml:
            buildspec = yaml.safe_load(yml)

        code_build = aws_codebuild.Project(
            self,
            id=codebuild_name,
            project_name=codebuild_name,
            build_spec=aws_codebuild.BuildSpec.from_object(buildspec),
            environment=aws_codebuild.BuildEnvironment(
                privileged=True,
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0),
            description='Greengrass用の推論アプリコンポーネントイメージを作成',
            timeout=Duration.minutes(60),
            role=codebuild_role)

        return code_build
Пример #13
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Glue job execution IAM Role
        glue_job_role = iam.Role(
            self,
            'Glue-Job-Role',
            assumed_by=iam.ServicePrincipal('glue.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSGlueServiceRole')
            ])

        S3_BUCKET_NAME = "MyCdkGlueJobBucket"

        # S3 Bucket to host glue scripts
        bucket = s3.Bucket(self,
                           S3_BUCKET_NAME,
                           versioned=True,
                           removal_policy=RemovalPolicy.DESTROY,
                           auto_delete_objects=True,
                           block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # asset to sync local scripts folder with S3 bucket
        asset = s3deploy.Source.asset("./resources/glue-scripts")

        # Sync local scripts with S3 bucket
        s3deploy.BucketDeployment(self,
                                  "DeployGlueJobScripts",
                                  sources=[asset],
                                  destination_bucket=bucket,
                                  destination_key_prefix="glue-python-scripts")

        # Grant read write access for glue execution IAM role for S3 bucket
        bucket.grant_read_write(glue_job_role)

        scriptLocation = 's3://' + bucket.bucket_name + '/glue-python-scripts/hello.py'

        # Python-shell Glue job
        job = glue.CfnJob(self,
                          'Glue-job',
                          name='cdk-test-glue-python-job',
                          role=glue_job_role.role_arn,
                          command=glue.CfnJob.JobCommandProperty(
                              name='pythonshell',
                              python_version='3',
                              script_location=scriptLocation))

        # Glue Start Job Run Task for State Function (integration_pattern = .sync)
        glue_task = sfn_tasks.GlueStartJobRun(
            self,
            "Task",
            glue_job_name=job.name,
            integration_pattern=sfn.IntegrationPattern.RUN_JOB,
            arguments=sfn.TaskInput.from_object(
                {"--message": sfn.JsonPath.string_at("$.message")}),
            timeout=Duration.minutes(6),
            notify_delay_after=Duration.minutes(6))

        # State Function defination
        definition = glue_task
        state_machine = sfn.StateMachine(self,
                                         "GlueJobStateMachine",
                                         definition=definition,
                                         timeout=Duration.minutes(10))

        # CDK Outputs
        CfnOutput(scope=self,
                  id='StateMachineArn',
                  value=state_machine.state_machine_arn)
        CfnOutput(scope=self, id='GlueJobName', value=job.name)
        CfnOutput(scope=self, id='S3BucketName', value=bucket.bucket_name)
Пример #14
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #the S3 bucket where CloudFront Access Logs will be stored
        cf_access_logs = s3.Bucket(self, "LogBucket")

        #S3 bucket where Athena will put the results
        athena_results = s3.Bucket(self, "AthenaResultsBucket")

        #create an Athena database
        glue_database_name = "serverlessland_database"
        myDatabase = glue.CfnDatabase(
            self,
            id=glue_database_name,
            catalog_id=account,
            database_input=glue.CfnDatabase.DatabaseInputProperty(
                description=f"Glue database '{glue_database_name}'",
                name=glue_database_name,
            )
        )

        #define a table with the structure of CloudFront Logs https://docs.aws.amazon.com/athena/latest/ug/cloudfront-logs.html
        athena_table = glue.CfnTable(self,
            id='cfaccesslogs',
            catalog_id=account,
            database_name=glue_database_name,
            table_input=glue.CfnTable.TableInputProperty(
                name='cf_access_logs',
                description='CloudFront access logs',
                table_type='EXTERNAL_TABLE',
                parameters = {
                    'skip.header.line.count': '2',
                },
                storage_descriptor=glue.CfnTable.StorageDescriptorProperty(
                    location="s3://"+cf_access_logs.bucket_name+"/",
                    input_format='org.apache.hadoop.mapred.TextInputFormat',
                    output_format='org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',
                    compressed=False,
                    serde_info=glue.CfnTable.SerdeInfoProperty(
                        serialization_library='org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe',
                        parameters={
                            'field.delim' : '	'
                        }
                    ),
                    columns=[
                        glue.CfnTable.ColumnProperty(name='date', type='date'),
                        glue.CfnTable.ColumnProperty(name='time', type='string'),
                        glue.CfnTable.ColumnProperty(name='location', type='string'),
                        glue.CfnTable.ColumnProperty(name='bytes', type='bigint'),
                        glue.CfnTable.ColumnProperty(name='request_ip', type='string'),
                        glue.CfnTable.ColumnProperty(name='method', type='string'),
                        glue.CfnTable.ColumnProperty(name='host', type='string'),
                        glue.CfnTable.ColumnProperty(name='uri', type='string'),
                        glue.CfnTable.ColumnProperty(name='status', type='string'),
                        glue.CfnTable.ColumnProperty(name='referer', type='string'),
                        glue.CfnTable.ColumnProperty(name='user_agent', type='string'),
                        glue.CfnTable.ColumnProperty(name='query_string', type='string'),
                        glue.CfnTable.ColumnProperty(name='cookie', type='string'),
                        glue.CfnTable.ColumnProperty(name='result_type', type='string'),
                        glue.CfnTable.ColumnProperty(name='request_id', type='string'),
                        glue.CfnTable.ColumnProperty(name='host_header', type='string'),
                        glue.CfnTable.ColumnProperty(name='request_protocol', type='string'),
                        glue.CfnTable.ColumnProperty(name='request_bytes', type='bigint'),
                        glue.CfnTable.ColumnProperty(name='time_taken', type='float'),
                        glue.CfnTable.ColumnProperty(name='xforwarded_for', type='string'),
                        glue.CfnTable.ColumnProperty(name='ssl_protocol', type='string'),
                        glue.CfnTable.ColumnProperty(name='ssl_cipher', type='string'),
                        glue.CfnTable.ColumnProperty(name='response_result_type', type='string'),
                        glue.CfnTable.ColumnProperty(name='http_version', type='string'),
                        glue.CfnTable.ColumnProperty(name='fle_status', type='string'),
                        glue.CfnTable.ColumnProperty(name='fle_encrypted_fields', type='int'),
                        glue.CfnTable.ColumnProperty(name='c_port', type='int'),
                        glue.CfnTable.ColumnProperty(name='time_to_first_byte', type='float'),
                        glue.CfnTable.ColumnProperty(name='x_edge_detailed_result_type', type='string'),
                        glue.CfnTable.ColumnProperty(name='sc_content_type', type='string'),
                        glue.CfnTable.ColumnProperty(name='sc_content_len', type='string'),
                        glue.CfnTable.ColumnProperty(name='sc_range_start', type='bigint'),
                        glue.CfnTable.ColumnProperty(name='sc_range_end', type='bigint')
                    ]
                ),
            )
        )

        #submit the query and wait for the results
        start_query_execution_job = tasks.AthenaStartQueryExecution(self, "Start Athena Query",
            query_string="SELECT uri FROM cf_access_logs limit 10",
            integration_pattern=sf.IntegrationPattern.RUN_JOB, #executes the command in SYNC mode
            query_execution_context=tasks.QueryExecutionContext(
                database_name=glue_database_name
            ),
            result_configuration=tasks.ResultConfiguration(
                output_location=s3.Location(
                    bucket_name=athena_results.bucket_name,
                    object_key="results"
                )
            )
        )

        #get the results
        get_query_results_job = tasks.AthenaGetQueryResults(self, "Get Query Results",
            query_execution_id=sf.JsonPath.string_at("$.QueryExecution.QueryExecutionId"),
            result_path=sf.JsonPath.string_at("$.GetQueryResults"),
        )

        #prepare the query to see if more results are available (up to 1000 can be retrieved)
        prepare_next_params = sf.Pass(self, "Prepare Next Query Params",
            parameters={
                "QueryExecutionId.$": "$.StartQueryParams.QueryExecutionId",
                "NextToken.$": "$.GetQueryResults.NextToken"
            },
            result_path=sf.JsonPath.string_at("$.StartQueryParams")
        )

        #check to see if more results are available
        has_more_results = sf.Choice(self, "Has More Results?").when(
                    sf.Condition.is_present("$.GetQueryResults.NextToken"),
                    prepare_next_params.next(get_query_results_job)
                ).otherwise(sf.Succeed(self, "Done"))


        #do something with each result
        #here add your own logic
        map = sf.Map(self, "Map State",
            max_concurrency=1,
            input_path=sf.JsonPath.string_at("$.GetQueryResults.ResultSet.Rows[1:]"),
            result_path = sf.JsonPath.DISCARD
        )
        map.iterator(sf.Pass(self, "DoSomething"))


        # Step function to orchestrate Athena query and retrieving the results
        workflow = sf.StateMachine(self, "AthenaQuery",
            definition=start_query_execution_job.next(get_query_results_job).next(map).next(has_more_results),
            timeout=Duration.minutes(60)
        )

        CfnOutput(self, "Logs",
            value=cf_access_logs.bucket_name, export_name='LogsBucket')

        CfnOutput(self, "SFName",
            value=workflow.state_machine_name, export_name='SFName')

        CfnOutput(self, "SFArn",
            value = workflow.state_machine_arn,
            export_name = 'StepFunctionArn',
            description = 'Step Function arn')
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="send_callback_token.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)

        step_function_definition = tasks.SqsSendMessage(
            self,
            "SendToSQS",
            integration_pattern=sf.IntegrationPattern.WAIT_FOR_TASK_TOKEN,
            queue=queue,
            message_body=sf.TaskInput.from_object({
                "input.$":
                "$",
                "token":
                sf.JsonPath.task_token
            }))
        step_function = sf.StateMachine(self,
                                        "StepFunction",
                                        definition=step_function_definition,
                                        timeout=Duration.minutes(60))

        #grant permission to Lambda function to send the token to the Step Function
        step_function.grant_task_response(lambda_function)

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')

        CfnOutput(self,
                  "StepFunctionArn",
                  value=step_function.state_machine_arn,
                  export_name='StepFunctionArn',
                  description='Step Function arn')
Пример #16
0
    def __init__(self, app: App, id: str, props, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # pipeline requires versioned bucket
        bucket = aws_s3.Bucket(
            self,
            "SourceBucket",
            bucket_name=f"{props['namespace'].lower()}-{Aws.ACCOUNT_ID}",
            versioned=True,
            removal_policy=RemovalPolicy.DESTROY)
        # ssm parameter to get bucket name later
        bucket_param = aws_ssm.StringParameter(
            self,
            "ParameterB",
            parameter_name=f"{props['namespace']}-bucket",
            string_value=bucket.bucket_name,
            description='cdk pipeline bucket')
        # ecr repo to push docker container into
        ecr = aws_ecr.Repository(self,
                                 "ECR",
                                 repository_name=f"{props['namespace']}",
                                 removal_policy=RemovalPolicy.DESTROY)
        # codebuild project meant to run in pipeline
        cb_docker_build = aws_codebuild.PipelineProject(
            self,
            "DockerBuild",
            project_name=f"{props['namespace']}-Docker-Build",
            build_spec=aws_codebuild.BuildSpec.from_source_filename(
                filename='pipeline_delivery/docker_build_buildspec.yml'),
            environment=aws_codebuild.BuildEnvironment(privileged=True, ),
            # pass the ecr repo uri into the codebuild project so codebuild knows where to push
            environment_variables={
                'ecr':
                aws_codebuild.BuildEnvironmentVariable(
                    value=ecr.repository_uri),
                'tag':
                aws_codebuild.BuildEnvironmentVariable(value='cdk')
            },
            description='Pipeline for CodeBuild',
            timeout=Duration.minutes(60),
        )
        # codebuild iam permissions to read write s3
        bucket.grant_read_write(cb_docker_build)

        # codebuild permissions to interact with ecr
        ecr.grant_pull_push(cb_docker_build)

        CfnOutput(
            self,
            "ECRURI",
            description="ECR URI",
            value=ecr.repository_uri,
        )
        CfnOutput(self,
                  "S3Bucket",
                  description="S3 Bucket",
                  value=bucket.bucket_name)

        self.output_props = props.copy()
        self.output_props['bucket'] = bucket
        self.output_props['cb_docker_build'] = cb_docker_build