Exemplo n.º 1
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ################################################################################
        # Create a Lambda function to process the CodeBuild state change events
        # and send out appropriate Slack messages

        # Permissions for the Lambda
        lambda_role = _iam.Role(
            self,
            id='UmccriseCodeBuildSlackLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMReadOnlyAccess'),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonEC2ContainerRegistryReadOnly')
            ])

        # The Lambda function itself
        function = _lambda.Function(
            self,
            id='UmccriseCodeBuildSlackLambda',
            handler='notify_slack.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambdas/slack'),
            environment={
                'SLACK_HOST': 'hooks.slack.com',
                'SLACK_CHANNEL': props['slack_channel'],
                'ECR_NAME': props['ecr_name'],
                'AWS_ACCOUNT':
                props['aws_account']  # TODO: get from kwargs (env)
            },
            role=lambda_role)

        ################################################################################
        # Create a reference to the UMCCRise CodeBuild project
        # TODO: should probably use cross-stack resource references
        cb_project = cb.Project.from_project_name(
            self,
            id='UmccriseCodeBuildProject',
            project_name=props['codebuild_project_name'])

        ################################################################################
        # Create an SNS topic to receive CodeBuild state change events
        sns_topic = _sns.Topic(self,
                               id='UmccriseCodeBuildSnsTopic',
                               display_name='UmccriseCodeBuildSnsTopic',
                               topic_name='UmccriseCodeBuildSnsTopic')
        sns_topic.grant_publish(cb_project)
        sns_topic.add_subscription(_sns_subs.LambdaSubscription(function))

        # Send state change events to SNS topic
        cb_project.on_state_change(
            id='UmccriseCodebuildStateChangeRule',
            rule_name='UmccriseCodebuildStateChangeRule',
            target=targets.SnsTopic(sns_topic))
Exemplo n.º 2
0
    def __init__(self, app: core.App, id: str, **kwargs):
        super().__init__(app, id, **kwargs)

        # [ SNS ] Topic:
        #
        # - The error topic for all issues.

        topic = sns.Topic(self, 'Topic', display_name='Pipeline Alert')

        # [ SNS ] Topic:
        #
        # - The error topic for all issues.

        sns_target = targets.SnsTopic(topic)

        # [ SNS ] Subscription:
        #
        # - Takes all emails in the list and creates email subscriptions for each.

        for email in notification_emails:
            topic.add_subscription(
                sns_subscriptions.EmailSubscription(email_address=email))

        # [ SNS ]
        #
        #

        self.sns_target = sns_target
Exemplo n.º 3
0
    def __init__(self, scope: core.Construct, id: str,  bank_account_service: lambda_.Function,
                 stage: Optional[str] = 'prod', **kwargs) -> None:
        super().__init__(scope, id+'-'+stage, **kwargs)

        # create SNS topic
        topic = sns.Topic(self, "BankTopic", display_name="SMSOutbound", topic_name="SMSOutbound")
        topic.add_subscription(subs.EmailSubscription(email_address="*****@*****.**"))

        # create the EventBridge stuff
        bus_name = 'banking-demo-events-'+stage
        bus = events.EventBus(self, id, event_bus_name=bus_name)
        events.Rule(self, "HUMAN_REVIEWED_APPLICATION", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["HUMAN_REVIEWED_APPLICATION"]), rule_name="HUMAN_REVIEWED_APPLICATION", enabled=True,
                    targets=[
                        targets.SnsTopic(topic)
                    ])
        events.Rule(self, "APPLICATION_SUBMITTED", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["APPLICATION_SUBMITTED"]), rule_name="APPLICATION_SUBMITTED", enabled=True)
        events.Rule(self, "APPLICATION_APPROVED", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["APPLICATION_APPROVED"]), rule_name="APPLICATION_APPROVED", enabled=True,
                         targets=[
                             targets.LambdaFunction(lambda_.Function.from_function_arn(
                                 self, "func", bank_account_service.function_arn))
                         ])

        self._event_bus_arn = bus.event_bus_arn
Exemplo n.º 4
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #SNS Topic
        MySnsTopic = sns.Topic(self, "MySnsTopic")

        # Custom EventBridge Bus
        custom_bus = events.EventBus(self,
                                     "bus",
                                     event_bus_name="test-bus-cdk")

        # EventBridge Rule
        rule = events.Rule(self, "rule", event_bus=custom_bus)

        # Event Pattern to filter events
        rule.add_event_pattern(source=["my-application"],
                               detail_type=["message"])

        # SNS topic as target for Eventbridge Rue
        rule.add_target(targets.SnsTopic(MySnsTopic))

        # CDK Outputs
        CfnOutput(self,
                  "SNS topic name",
                  description="SNS topic name",
                  value=MySnsTopic.topic_name)
        CfnOutput(self,
                  "SNS topic ARN",
                  description="SNS topic ARN",
                  value=MySnsTopic.topic_arn)
Exemplo n.º 5
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        
        #template = cfn_inc.CfnInclude(self, id='Template', template_file='template.yaml')
        # The code that defines your stack goes here
        bucket_names = 'config-1' + str(core.Aws.ACCOUNT_ID)
        sns_topic = _sns.Topic(self, id='topic-config', topic_name='config-topic')
        sns_topic.add_subscription(subscriptions.EmailSubscription("*****@*****.**"))
        bucket = s3.Bucket(self, id='s3cdkbuckets',bucket_name=bucket_names,versioned=True)
        bucket_arn2 = str(bucket.bucket_arn) + "/AWSLogs/" + str(core.Aws.ACCOUNT_ID) + "/Config/*"
        bucket_policy = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW, 
                                                                             resources=[bucket.bucket_arn],
                                                                             actions=["s3:GetBucketAcl"],
                                                                             sid = "AWSConfigBucketPermissionsCheck",
                                                                             principals=[iam.ServicePrincipal("config.amazonaws.com")]
                                                                             ))
        bucket_policy2 = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                                           resources=[bucket_arn2],
                                                                           actions=["s3:PutObject"],
                                                                           sid = "AWSConfigBucketDelivery",
                                                                           principals=[iam.ServicePrincipal("config.amazonaws.com")],
                                                                           conditions={"StringEquals": {
                                                                               "s3:x-amz-acl": "bucket-owner-full-control"}
                                                                                        }))
        recorder = config.CfnConfigurationRecorder(self,
                id='recorder',
                role_arn='arn:aws:iam::306646308112:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig',
                recording_group=None)
        channel = config.CfnDeliveryChannel(self,
                id='channel',
                s3_bucket_name=bucket.bucket_name,
                sns_topic_arn=sns_topic.topic_arn)
        time.sleep(20)
        srule = config.CfnConfigRule(self,
                id='rule1',
                source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="REQUIRED_TAGS"),  
                input_parameters={"tag1Key":"tagVal"})
        srule2 = config.CfnConfigRule(self, id='rule2',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="S3_BUCKET_LEVEL_PUBLIC_ACCESS_PROHIBITED"))
        srule3 = config.CfnConfigRule(self, id='rule3',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="VPC_SG_OPEN_ONLY_TO_AUTHORIZED_PORTS"))
        srule.add_depends_on(recorder)
        srule2.add_depends_on(recorder)
        srule3.add_depends_on(recorder)
        event_rule = _events.Rule(self, id='event_rule', event_pattern = {
           "source": ["aws.config"],
           "detail": {
               "messageType": ["ConfigurationItemChangeNotification"],
               "newEvaluationResult": {
               "compliance_type": ["NON_COMPLIANT"]
    }
  }
})
        event_rule.add_target(targets.SnsTopic(sns_topic))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create lambda function
        lambda_pipeline_alerts = _lambda.Function(
            self,
            id="lambda_pipeline_alerts_asset",
            function_name='lambda_pipeline_alerts_asset',
            code=_lambda.Code.asset("lambda_pipeline_alerts_asset"),
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler")
        # add Env Vars
        lambda_pipeline_alerts.add_environment(
            'SLACK_WEB_HOOK_URL',
            'https://hooks.slack.com/services/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'
        )
        lambda_pipeline_alerts.add_environment('SLACK_CHANNEL',
                                               '#tech-pay-deploys')

        # Create sns topic for the pipeline events
        sns_topic_pipeline_alerts = _sns.Topic(self,
                                               id='sns_pipeline_alerts',
                                               display_name='pipelines-events',
                                               topic_name='pipelines-events')
        # add lambda to sns subscription
        sns_topic_pipeline_alerts.add_subscription(
            _sns_subscription.LambdaSubscription(lambda_pipeline_alerts))

        # Create the event rule
        event_rule = _events.Rule(
            self,
            id='pipeline_alerts',
            rule_name='pipeline_alerts',
            description='Cloud Watch Event Rule to check pipeline events')

        # Cloud watch event configuration
        event_source = ["aws.codepipeline"]
        event_detail_type = ["CodePipeline Pipeline Execution State Change"]
        event_detail = {"state": ["FAILED"]}

        # add event pattern to send to target
        event_rule.add_event_pattern(detail=event_detail,
                                     detail_type=event_detail_type,
                                     source=event_source)

        # add target
        pipeline_name = _events.EventField.from_path('$.detail.pipeline')

        event_rule.add_target(
            _targets.SnsTopic(
                sns_topic_pipeline_alerts,
                message=_events.RuleTargetInput.from_text(
                    f':rotating_light:The Pipeline `{pipeline_name}` has failed.:rotating_light:'
                )))
Exemplo n.º 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ZachRepositoryName = self.__class__.__name__
        ZachRepository = ecr.Repository(self, id=ZachRepositoryName)
        ZachRepository.add_lifecycle_rule(
            max_image_count=100,
            rule_priority=50,
            tag_status=ecr.TagStatus.TAGGED,
            tag_prefix_list=['sit', 'uat', 'dev'])
        ZachRepository.add_lifecycle_rule(
            max_image_age=core.Duration.days(720), rule_priority=100)
        ZachRepository.grant_pull_push(
            iam.Role.from_role_arn(
                self,
                id="EC2FullPrivilege",
                role_arn="arn:aws:iam::098380756085:role/Zach_EC2FullPrivilege"
            ))
        ZachRepository.grant_pull(
            iam.Role.from_role_arn(
                self,
                id="EC2ReadPrivilege",
                role_arn="arn:aws:iam::098380756085:role/Zach_EC2ReadPrivilege"
            ))
        ZachRepositoryEvent = ZachRepository.on_cloud_trail_event(
            id="DockerTrail",
            event_pattern=event.EventPattern(account=[
                self.node.try_get_context("account") or os.getenv('account')
                or '098380756085'
            ],
                                             region=['ap-southeast-1']))

        ZachSNStopic = self.SnsExporter(ZachRepositoryName)
        ZachRepositoryEvent.add_target(eventarget.SnsTopic(topic=ZachSNStopic))

        core.CfnOutput(self,
                       id=ZachRepositoryName + "ARN",
                       value=ZachRepository.repository_arn)
        core.CfnOutput(self,
                       id=ZachRepositoryName + "URI",
                       value=ZachRepository.repository_uri)
        core.CfnOutput(self,
                       id=ZachRepositoryName + "RuleARN",
                       value=ZachRepositoryEvent.rule_arn)
        core.CfnOutput(self,
                       id=ZachRepositoryName + "RuleName",
                       value=ZachRepositoryEvent.rule_name)
        core.CfnOutput(self,
                       id=ZachRepositoryName + "TopicARN",
                       value=ZachSNStopic.topic_arn)
        core.CfnOutput(self,
                       id=ZachRepositoryName + "TopicName",
                       value=ZachSNStopic.topic_name)
Exemplo n.º 8
0
 def __init__(self, scope: core.Construct, id: str, params: dict, **kwargs):  #pylint: disable=W0622
     super().__init__(scope, id, **kwargs)
     _pipeline = _codepipeline.Pipeline.from_pipeline_arn(
         self, 'pipeline', params["pipeline"])
     _source_account = params.get('source', {}).get('account_id')
     _provider = params.get('source', {}).get('provider')
     if _source_account and _provider == 'codecommit':
         _event = _events.Rule(
             self,
             'trigger_{0}'.format(params["name"]),
             description=
             "Triggers {0} on changes in source CodeCommit repository".
             format(params["name"]),
             event_pattern=_events.EventPattern(
                 resources=[
                     "arn:aws:codecommit:{0}:{1}:{2}".format(
                         ADF_DEPLOYMENT_REGION,
                         params['source']['account_id'],
                         params['source']['repo_name'])
                 ],
                 source=["aws.codecommit"],
                 detail_type=['CodeCommit Repository State Change'],
                 detail={
                     "event": ["referenceCreated", "referenceUpdated"],
                     "referenceType": ["branch"],
                     "referenceName": ["master"]
                 }))
         _event.add_target(_targets.CodePipeline(pipeline=_pipeline))
     if params.get('topic_arn'):
         _topic = _sns.Topic.from_topic_arn(self, 'topic_arn',
                                            params["topic_arn"])
         _event = _events.Rule(
             self,
             'pipeline_state_{0}'.format(params["name"]),
             description=
             "{0} | Trigger notifications based on pipeline state changes".
             format(params["name"]),
             enabled=True,
             event_pattern=_events.EventPattern(
                 detail={
                     "state": ["FAILED", "STARTED", "SUCCEEDED"],
                     "pipeline":
                     ["{0}{1}".format(ADF_PIPELINE_PREFIX, params["name"])]
                 },
                 detail_type=[
                     "CodePipeline Pipeline Execution State Change"
                 ],
                 source=["aws.codepipeline"]))
         _event.add_target(
             _targets.SnsTopic(
                 topic=_topic,
                 message=_events.RuleTargetInput.from_text(
                     "The pipeline {0} from account {1} has {2} at {3}.".
                     format(
                         _events.EventField.from_path(
                             '$.detail.pipeline'
                         ),  # Need to parse and get the pipeline: "$.detail.pipeline" state: "$.detail.state"
                         _events.EventField.account,
                         _events.EventField.from_path('$.detail.state'),
                         _events.EventField.time))))
     if params.get('completion_trigger'):
         # There might be other types of completion triggers later, eg lambda..
         for index, pipeline in enumerate(params['completion_trigger'].get(
                 'pipelines', [])):
             _event = _events.Rule(
                 self,
                 'completion_{0}'.format(pipeline),
                 description="Triggers {0} on completion of {1}".format(
                     pipeline, params['pipeline']),
                 enabled=True,
                 event_pattern=_events.EventPattern(
                     detail={
                         "state": ["SUCCEEDED"],
                         "pipeline": [
                             "{0}{1}".format(ADF_PIPELINE_PREFIX,
                                             params["name"])
                         ]
                     },
                     detail_type=[
                         "CodePipeline Pipeline Execution State Change"
                     ],
                     source=["aws.codepipeline"]))
             _completion_pipeline = _codepipeline.Pipeline.from_pipeline_arn(
                 self, 'pipeline-{0}'.format(index),
                 "arn:aws:codepipeline:{0}:{1}:{2}".format(
                     ADF_DEPLOYMENT_REGION, ADF_DEPLOYMENT_ACCOUNT_ID,
                     "{0}{1}".format(ADF_PIPELINE_PREFIX, pipeline)))
             _event.add_target(
                 _targets.CodePipeline(pipeline=_completion_pipeline))
     if params.get('schedule'):
         _event = _events.Rule(
             self,
             'schedule_{0}'.format(params['name']),
             description="Triggers {0} on a schedule of {1}".format(
                 params['name'], params['schedule']),
             enabled=True,
             schedule=_events.Schedule.expression(params['schedule']))
         _target_pipeline = _targets.CodePipeline(pipeline=_pipeline)
         _event.add_target(_target_pipeline)
Exemplo n.º 9
0
    def __init__(self, scope: core.Construct, construct_id: str, cert_arn: str,
                 hosted_zone_id: str, domain_name: str, **kwargs) -> None:
        """
        :param cert_arn: ARN of certificate to use
        :param hosted_zone_id: ID of hosted zone to use
        :param domain_name: Domain name to use
        """
        super().__init__(scope, construct_id, **kwargs)

        ##################################
        # WEBSITE HOSTING INFRASTRUCTURE #
        ##################################

        # Grab hosted zone for the website to contain our records and an SSL certificate for HTTPS. These two have to
        # be grabbed from existing resources instead of created here because CloudFormation will time out waiting for a
        # newly-created cert to validate.
        self.hosted_zone = PublicHostedZone.from_public_hosted_zone_id(
            self, "personal-site-hosted-zone", hosted_zone_id)
        self.cert = Certificate.from_certificate_arn(self,
                                                     "personal-site-cert",
                                                     cert_arn)

        # Add an S3 bucket to host the website content
        self.website_bucket = Bucket(self,
                                     "personal-site-bucket",
                                     bucket_name=domain_name,
                                     removal_policy=RemovalPolicy.DESTROY,
                                     public_read_access=True,
                                     website_index_document="index.html",
                                     website_error_document="index.html")

        # Create a cloudfront distribution for the site
        self.distribution = Distribution(
            self,
            "personal-site-cf-distribution",
            default_behavior={
                "origin": S3Origin(self.website_bucket),
                "allowed_methods": AllowedMethods.ALLOW_GET_HEAD_OPTIONS,
                "viewer_protocol_policy":
                ViewerProtocolPolicy.REDIRECT_TO_HTTPS
            },
            certificate=self.cert,
            minimum_protocol_version=SecurityPolicyProtocol.TLS_V1_2_2019,
            enable_ipv6=True,
            domain_names=[domain_name, f"www.{domain_name}"])

        # Point traffic to base and www.base to the cloudfront distribution, for both IPv4 and IPv6
        ARecord(self,
                "personal-site-a-record",
                zone=self.hosted_zone,
                record_name=f"{domain_name}.",
                target=RecordTarget.from_alias(
                    CloudFrontTarget(self.distribution)))
        ARecord(self,
                "personal-site-a-record-www",
                zone=self.hosted_zone,
                target=RecordTarget.from_alias(
                    CloudFrontTarget(self.distribution)),
                record_name=f"www.{domain_name}.")
        AaaaRecord(self,
                   "personal-site-aaaa-record",
                   zone=self.hosted_zone,
                   record_name=f"{domain_name}.",
                   target=RecordTarget.from_alias(
                       CloudFrontTarget(self.distribution)))
        AaaaRecord(self,
                   "personal-site-aaaa-record-www",
                   zone=self.hosted_zone,
                   target=RecordTarget.from_alias(
                       CloudFrontTarget(self.distribution)),
                   record_name=f"www.{domain_name}.")

        #############################
        # WEBSITE CD INFRASTRUCTURE #
        #############################

        # CodeBuild project to build the website
        self.code_build_project = \
            Project(self, "personal-site-builder",
                    project_name="PersonalWebsite",
                    description="Builds & deploys a personal static website on changes from GitHub",
                    source=Source.git_hub(
                        owner="c7c8",
                        repo="crmyers.dev",
                        clone_depth=1,
                        branch_or_ref="master",
                        webhook_filters=[
                            FilterGroup.in_event_of(EventAction.PUSH, EventAction.PULL_REQUEST_MERGED).and_branch_is(
                                "master")]),
                    artifacts=Artifacts.s3(bucket=self.website_bucket, include_build_id=False,
                                           package_zip=False,
                                           path="/"),
                    build_spec=BuildSpec.from_object_to_yaml({
                        "version": "0.2",
                        "phases": {
                            "install": {
                                "runtime-versions": {
                                    "nodejs": 10,
                                }
                            },
                            "pre_build": {
                                "commands": ["npm install"]
                            },
                            "build": {
                                "commands": [
                                    "npm run-script build &&",
                                    f"aws cloudfront create-invalidation --distribution-id={self.distribution.distribution_id} --paths '/*'"
                                ]
                            }
                        },
                        "artifacts": {
                            "files": ["./*"],
                            "name": ".",
                            "discard-paths": "no",
                            "base-directory": "dist/crmyers-dev"
                        }
                    }))
        self.code_build_project.role.add_to_policy(
            PolicyStatement(
                effect=Effect.ALLOW,
                resources=[
                    f"arn:aws:cloudfront::{self.account}:distribution/{self.distribution.distribution_id}"
                ],
                actions=['cloudfront:CreateInvalidation']))

        # Set up an SNS topic for text message notifications
        self.deployment_topic = Topic(self,
                                      'personal-site-deployment-topic',
                                      topic_name='WebsiteDeployments',
                                      display_name='Website Deployments')
        self.deployment_topic.add_subscription(SmsSubscription("+19255968684"))
        self.code_build_project.on_build_failed(
            "BuildFailed",
            target=targets.SnsTopic(self.deployment_topic,
                                    message=RuleTargetInput.from_text(
                                        "Build for crmyers.dev FAILED")))
        self.code_build_project.on_build_succeeded(
            "BuildSucceeded",
            target=targets.SnsTopic(self.deployment_topic,
                                    message=RuleTargetInput.from_text(
                                        "Build for crmyers.dev SUCCEEDED")))
Exemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open('./props/tasksetting.json', 'r') as f1:
            py_json1 = json.load(f1)
            ts = json.dumps(py_json1)

        # with open('./props/mappingrule.json', 'r') as f2:
        #     py_json2 = json.load(f2)
        #     mr = json.dumps(py_json2)

        with open('./props/config.json', 'r') as f2:
            configuration = json.load(f2)

        def getMappingrules(self, table_list):
            rules = []
            for index, value in enumerate(table_list, 1):
                rules.append({
                    "rule-type": "selection",
                    "rule-id": str(index),
                    "rule-name": str(index),
                    "object-locator": {
                        "schema-name": value['schemaName'],
                        "table-name": value['tableName']
                    },
                    "rule-action": "include",
                    "filters": []
                })
            mapping_rules = {"rules": rules}
            return json.dumps(mapping_rules)

        # The code that defines your stack goes here
        S3Accessrole = _iam.Role(
            self,
            'dmsrole',
            assumed_by=_iam.ServicePrincipal('dms.amazonaws.com'),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonS3FullAccess')
            ])

        raw_bucket = s3.Bucket(self,
                               'rawbucket',
                               bucket_name='rawbucket-datalake-cdk-oregon')
        raw_bucket.add_lifecycle_rule(
            enabled=configuration['s3LifecycleRule']['enabled'],
            expiration=core.Duration.days(
                configuration['s3LifecycleRule']['expiration']))

        #my_table = ddb.Table(self, id ='dunamoTable', table_name = 'testcdktable',
        #partition_key = ddb.Attribute(name ='lastname',type = ddb.AttributeType.STRING) )

        dl_dms = _dms.CfnReplicationInstance(
            self,
            'dmsreplication',
            replication_instance_class=configuration['DMS_instance_setting']
            ['instance_class'],
            replication_instance_identifier='datalake-instance-cdk',
            allocated_storage=configuration['DMS_instance_setting']
            ['allocated_storage'])

        source_endpoint = _dms.CfnEndpoint(
            self,
            'sourceendpoint',
            endpoint_type='source',
            engine_name=configuration['engineName'],
            database_name=configuration['databaseName'],
            username=configuration['username'],
            password=configuration['password'],
            port=configuration['port'],
            server_name=configuration['serverName'],
        )

        target_endpoint = _dms.CfnEndpoint(
            self,
            'targetendpoint',
            endpoint_type='target',
            engine_name='s3',
            s3_settings={
                'bucketName': raw_bucket.bucket_name,
                'serviceAccessRoleArn': S3Accessrole.role_arn
            },
            extra_connection_attributes='dataFormat=parquet')

        dms_task = _dms.CfnReplicationTask(
            self,
            'data2lake-task',
            migration_type='full-load-and-cdc',
            replication_instance_arn=dl_dms.ref,
            source_endpoint_arn=source_endpoint.ref,
            target_endpoint_arn=target_endpoint.ref,
            replication_task_settings=ts,
            table_mappings=getMappingrules(self, configuration['tableList']))

        my_table = ddb.Table(self,
                             id='dynamoTable',
                             table_name='ControllerTable',
                             partition_key=ddb.Attribute(
                                 name='path', type=ddb.AttributeType.STRING),
                             billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        datalake_bucket = s3.Bucket(self,
                                    'datalakebucket',
                                    bucket_name='datalake-bucket-cdk-oregon')

        glue_role = _iam.Role(
            self,
            'gluerole',
            assumed_by=_iam.ServicePrincipal('glue.amazonaws.com'),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSGlueServiceRole')
            ])

        raw_bucket.grant_read(glue_role)
        datalake_bucket.grant_read_write(glue_role)

        #lake formation settings
        #If you have attached managed policy ('AWSLakeFormationDataAdmin') to your own iam user, you should change that policy to allow "lakeformation:PutDataLakeSettings",
        #so that the lake setting can be allowed by below code in cdk.
        lake_admin_setting = _lakeformation.CfnDataLakeSettings(
            self,
            'data-lake-GrantAdmin',
            admins=[
                _lakeformation.CfnDataLakeSettings.DataLakePrincipalProperty(
                    data_lake_principal_identifier=configuration[
                        'executiveArn'])
            ])

        glue_database = _glue.Database(self,
                                       'gluedatabase',
                                       database_name='data_lake_gluedb')

        glue_database.node.add_dependency(lake_admin_setting)

        glue_role_permission_inLakeFormation = _lakeformation.CfnPermissions(
            self,
            'permission-glueRole',
            data_lake_principal=_lakeformation.CfnPermissions.
            DataLakePrincipalProperty(
                data_lake_principal_identifier=glue_role.role_arn),
            resource=_lakeformation.CfnPermissions.ResourceProperty(
                database_resource=_lakeformation.CfnPermissions.
                DatabaseResourceProperty(name=glue_database.database_name)),
            permissions=['ALL'])

        crawler = _glue.CfnCrawler(
            self,
            'datalakecrawler',
            name='Crawler-datalake-cdk',
            role=glue_role.role_arn,
            targets={
                's3Targets': [{
                    'path':
                    's3://' + datalake_bucket.bucket_name + '/datalake/'
                }]
            },
            database_name='data_lake_gluedb',
            configuration=
            "{\"Version\":1.0,\"CrawlerOutput\":{\"Partitions\":{\"AddOrUpdateBehavior\":\"InheritFromTable\"},\"Tables\":{\"AddOrUpdateBehavior\":\"MergeNewColumns\"}}}"
        )

        initialload_script = S3Assets.Asset(self,
                                            'initial-load-code',
                                            path='./Gluejob/InitialLoad.py')
        incrementalload_script = S3Assets.Asset(
            self, 'incremental-load-code', path='./Gluejob/IncrementalLoad.py')

        initialload_script.grant_read(glue_role)
        incrementalload_script.grant_read(glue_role)
        my_table.grant_full_access(glue_role)

        initial_load_job = _glue.CfnJob(
            self,
            'initial-job',
            name='InitialLoad-cdk',
            command=_glue.CfnJob.JobCommandProperty(
                name='glueetl',
                python_version='3',
                script_location='s3://' + initialload_script.s3_bucket_name +
                '/' + initialload_script.s3_object_key),
            role=glue_role.role_arn,
            default_arguments={
                '--prefix': str(configuration['tableList']),
                '--bucket': raw_bucket.bucket_name,
                '--datalake_bucket': datalake_bucket.bucket_name,
                '--datalake_prefix': 'datalake/',
                '--region': CdkpyStack.of(self).region,
                '--controller_table_name': my_table.table_name
            },
            allocated_capacity=configuration['glue_job_setting']
            ['job_capacity'],
            execution_property=_glue.CfnJob.ExecutionPropertyProperty(
                max_concurrent_runs=configuration['glue_job_setting']
                ['max_concurrent_run_JobExecution']))

        incremental_load_job = _glue.CfnJob(
            self,
            'increment-job',
            name='IncrementalLoad-cdk',
            command=_glue.CfnJob.JobCommandProperty(
                name='glueetl',
                script_location='s3://' +
                incrementalload_script.s3_bucket_name + '/' +
                incrementalload_script.s3_object_key,
                python_version='3'),
            role=glue_role.role_arn,
            default_arguments={
                '--prefix': str(configuration['tableList']),
                '--bucket': raw_bucket.bucket_name,
                '--datalake_bucket': datalake_bucket.bucket_name,
                '--datalake_prefix': 'datalake/',
                '--region': CdkpyStack.of(self).region,
                '--controller_table_name': my_table.table_name
            },
            allocated_capacity=2,
            execution_property=_glue.CfnJob.ExecutionPropertyProperty(
                max_concurrent_runs=1))

        job_trigger = _glue.CfnTrigger(
            self,
            'datalake-glue-trigger',
            type='SCHEDULED',
            schedule=configuration['job_trigger_schedule'],
            start_on_creation=False,
            actions=[
                _glue.CfnTrigger.ActionProperty(job_name='IncrementalLoad-cdk')
            ])

        dl_sns = _sns.Topic(self, 'datalake_sns', display_name='data-lake-sns')

        endpoint_email = configuration['emailSubscriptionList']

        for emails in endpoint_email:
            dl_sns.add_subscription(_subscrption.EmailSubscription(emails))

        #Another way to subscribe: dl_subscription = _sns.Subscription(self,'email-subscrption',topic = dl_sns,endpoint='*****@*****.**',protocol= _sns.SubscriptionProtocol.EMAIL)

        glue_events_target = _events_targets.SnsTopic(dl_sns)

        glue_events_rule = _events.Rule(
            self,
            'gluejobevents-datalake',
            description='Using for tracking the failed glue job of data lake',
            rule_name='dl-gluejob-event',
            event_pattern=_events.EventPattern(
                source=['aws.glue'],
                detail_type=['Glue Job State Change'],
                detail={
                    "jobName": [initial_load_job.name],
                    "state": ["FAILED"]
                }),
            targets=[glue_events_target])

        dms_subscription = _dms.CfnEventSubscription(
            self,
            'dmsevents-datalake',
            sns_topic_arn=dl_sns.topic_arn,
            subscription_name='datalake-dmsevents',
            source_type='replication-task',
            event_categories=['failure'])
Exemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        template_zip_name = create_template_zip_name_parameter(self)
        template_file_name = create_template_file_name_parameter(self)
        dev_params_file_name = create_stage_params_file_name_parameter(self, "DEV_PARAMS_NAME", "development")
        staging_params_file_name = create_stage_params_file_name_parameter(self, "STAGING_PARAMS_NAME", "staging")
        prod_params_file_name = create_stage_params_file_name_parameter(self, "PROD_PARAMS_NAME", "production")
        # create development parameters
        account_type = "development"
        dev_account_id = create_account_id_parameter(self, "DEV_ACCOUNT_ID", account_type)
        dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID", account_type)
        # create staging parameters
        account_type = "staging"
        staging_account_id = create_account_id_parameter(self, "STAGING_ACCOUNT_ID", account_type)
        staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID", account_type)
        # create production parameters
        account_type = "production"
        prod_account_id = create_account_id_parameter(self, "PROD_ACCOUNT_ID", account_type)
        prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID", account_type)
        # assets parameters
        blueprint_bucket_name = create_blueprint_bucket_name_parameter(self)
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        stack_name = create_stack_name_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)

        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # create sns topic and subscription
        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_template(template_zip_name, assets_bucket)

        # DeployDev stage
        dev_deploy_lambda_arn, dev_stackset_action = create_stackset_action(
            self,
            "DeployDevStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            dev_params_file_name.value_as_string,
            [dev_account_id.value_as_string],
            [dev_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-dev",
        )

        # DeployStaging manual approval
        deploy_staging_approval = approval_action(
            "DeployStaging",
            pipeline_notification_topic,
            [notification_email.value_as_string],
            "Please approve to deploy to staging account",
        )

        # DeployStaging stage
        staging_deploy_lambda_arn, staging_stackset_action = create_stackset_action(
            self,
            "DeployStagingStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            staging_params_file_name.value_as_string,
            [staging_account_id.value_as_string],
            [staging_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-staging",
        )

        # DeployProd manual approval
        deploy_prod_approval = approval_action(
            "DeployProd",
            pipeline_notification_topic,
            [notification_email.value_as_string],
            "Please approve to deploy to production account",
        )

        # DeployProd stage
        prod_deploy_lambda_arn, prod_stackset_action = create_stackset_action(
            self,
            "DeployProdStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            prod_params_file_name.value_as_string,
            [prod_account_id.value_as_string],
            [prod_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-prod",
        )

        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[dev_deploy_lambda_arn, staging_deploy_lambda_arn, prod_deploy_lambda_arn],
        )

        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])

        deploy_dev_stage = codepipeline.StageProps(
            stage_name="DeployDev",
            actions=[dev_stackset_action, deploy_staging_approval],
        )

        deploy_staging_stage = codepipeline.StageProps(
            stage_name="DeployStaging",
            actions=[staging_stackset_action, deploy_prod_approval],
        )

        deploy_prod_stage = codepipeline.StageProps(
            stage_name="DeployProd",
            actions=[prod_stackset_action],
        )

        # constructing multi-account pipeline
        multi_account_pipeline = codepipeline.Pipeline(
            self,
            "MultiAccountPipeline",
            stages=[source_stage, deploy_dev_stage, deploy_staging_stage, deploy_prod_stage],
            cross_account_keys=False,
        )
        # add notification to the development stackset action
        dev_stackset_action.on_state_change(
            "NotifyUserDevDeployment",
            description="Notify user of the outcome of the DeployDev action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployDev action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the staging stackset action
        staging_stackset_action.on_state_change(
            "NotifyUserStagingDeployment",
            description="Notify user of the outcome of the DeployStaging action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployStaging action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the production stackset action
        prod_stackset_action.on_state_change(
            "NotifyUserProdDeployment",
            description="Notify user of the outcome of the DeployProd action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployProd action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the multi-account pipeline
        multi_account_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        multi_account_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )

        # add lambda permissons
        multi_account_pipeline.add_to_role_policy(invoke_lambdas_policy)

        # add cfn supressions

        pipeline_child_nodes = multi_account_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[32].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[45].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(multi_account_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{multi_account_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            constraint_description="Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320,
        )
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description="Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3,
        )
        assets_bucket_name = core.CfnParameter(
            self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3
        )
        custom_container = core.CfnParameter(
            self,
            "CUSTOM_CONTAINER",
            default="",
            type="String",
            description=(
                "Should point to a zip file containing dockerfile and assets for building a custom model. "
                "If empty it will beusing containers from SageMaker Registry"
            ),
        )
        model_framework = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK",
            default="",
            type="String",
            description="The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.",
        )
        model_framework_version = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK_VERSION",
            default="",
            type="String",
            description="The version of the ML framework which is used for training the model. E.g., 1.1-2",
        )
        model_name = core.CfnParameter(
            self, "MODEL_NAME", type="String", description="An arbitrary name for the model.", min_length=1
        )
        model_artifact_location = core.CfnParameter(
            self,
            "MODEL_ARTIFACT_LOCATION",
            type="String",
            description="Path to model artifact inside assets bucket.",
        )
        inference_instance = core.CfnParameter(
            self,
            "INFERENCE_INSTANCE",
            type="String",
            description="Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=7,
        )
        # Resources #

        # access_bucket = s3.Bucket.from_bucket_name(self, "AccessBucket", access_bucket_name.value_as_string)
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_custom(
            model_artifact_location, assets_bucket, custom_container
        )

        # build stage
        build_action_definition, container_uri = build_action(self, source_output)

        # deploy stage
        sm_layer = sagemaker_layer(self, blueprint_bucket)
        # creating a sagemaker model
        model_lambda_arn, create_model_definition = create_model(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            model_artifact_location,
            custom_container,
            model_framework,
            model_framework_version,
            container_uri,
            sm_layer,
        )
        # creating a sagemaker endpoint
        endpoint_lambda_arn, create_endpoint_definition = create_endpoint(
            self, blueprint_bucket, assets_bucket, model_name, inference_instance
        )
        # Share stage
        configure_lambda_arn, configure_inference_definition = configure_inference(self, blueprint_bucket)

        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[model_lambda_arn, endpoint_lambda_arn, configure_lambda_arn],
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])
        build_stage = codepipeline.StageProps(stage_name="Build", actions=[build_action_definition])
        deploy_stage_realtime = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[
                create_model_definition,
                create_endpoint_definition,
            ],
        )
        share_stage = codepipeline.StageProps(stage_name="Share", actions=[configure_inference_definition])

        realtime_build_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineReatimeBuild",
            stages=[source_stage, build_stage, deploy_stage_realtime, share_stage],
            cross_account_keys=False,
        )
        realtime_build_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        realtime_build_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )
        # add lambda permissons
        realtime_build_pipeline.add_to_role_policy(invoke_lambdas_policy)
        # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed
        # There is room for improving the method in future versions to find CDK nodes without having to use
        # hardocded index numbers
        pipeline_child_nodes = realtime_build_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[25].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[36].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(realtime_build_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{realtime_build_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )
        core.CfnOutput(
            self,
            id="SageMakerModelName",
            value=model_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointConfigName",
            value=f"{model_name.value_as_string}-endpoint-config",
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointName",
            value=f"{model_name.value_as_string}-endpoint",
        )
        core.CfnOutput(
            self,
            id="EndpointDataCaptureLocation",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_bucket.bucket_name}/datacapture",
            description="Endpoint data capture location (to be used by Model Monitor)",
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        custom_container = create_custom_container_parameter(self)
        ecr_repo_name = create_ecr_repo_name_parameter(self)
        image_tag = create_image_tag_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(
            self, "AssetsBucket", assets_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_custom(
            assets_bucket, custom_container)

        # build stage
        build_action_definition, container_uri = build_action(
            self, ecr_repo_name.value_as_string, image_tag.value_as_string,
            source_output)

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # createing pipeline stages
        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        build_stage = codepipeline.StageProps(
            stage_name="Build", actions=[build_action_definition])

        image_builder_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineReatimeBuild",
            stages=[source_stage, build_stage],
            cross_account_keys=False,
        )
        image_builder_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text((
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                    f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            ),
            event_pattern=events.EventPattern(
                detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        image_builder_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            ))

        # add cfn nag supressions
        pipeline_child_nodes = image_builder_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        # attaching iam permissions to the pipelines
        pipeline_permissions(image_builder_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            (f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
             f"{image_builder_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
             ),
        )
        core.CfnOutput(
            self,
            id="CustomAlgorithmImageURI",
            value=container_uri,
        )
Exemplo n.º 14
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket for artifacts
        artifacts_bucket = aws_s3.Bucket(
            self,
            "artifacts-bucket",
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            encryption=aws_s3.BucketEncryption.S3_MANAGED)

        # create codebuild project that executes a long-running script
        build_project = aws_codebuild.Project(
            self,
            "long-running-script-build-project",
            environment_variables={
                "S3_ARTIFACTS_BUCKET": {
                    "value": artifacts_bucket.bucket_name
                },
                "S3_ARTIFACTS_OBJECT": {
                    "value": "script.py"
                }
            },
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_3_0, ),
            timeout=cdk.Duration.hours(1),
            build_spec=aws_codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "install": {
                        "runtime-versions": {
                            "python": 3.8
                        }
                    },
                    "build": {
                        "commands": [
                            "aws s3 cp s3://$S3_ARTIFACTS_BUCKET/$S3_ARTIFACTS_OBJECT $S3_ARTIFACTS_OBJECT",
                            "python $S3_ARTIFACTS_OBJECT"
                        ]
                    }
                }
            }))
        # grant read access of the artifacts bucket to the codebuild role
        artifacts_bucket.grant_read(build_project.role)

        # create eventbridge rule to trigger codebuild project
        long_running_script_rule = aws_events.Rule(
            self,
            "long-running-script-build-trigger",
            schedule=aws_events.Schedule.rate(cdk.Duration.hours(1)))
        long_running_script_rule.add_target(
            targets.CodeBuildProject(build_project))

        # create sns topic as part of downstream services after codebuild project completes
        sns_topic = aws_sns.Topic(self, "script-completes-topic")

        # create eventbridge rule to publish to sns topic once codebuild project finishes (either succeeded, failed or stopped)
        codebuild_completes_rule = aws_events.Rule(
            self,
            "codebuild-scripts-complete-rule",
            event_pattern=aws_events.EventPattern(
                source=["aws.codebuild"],
                detail_type=["CodeBuild Build State Change"],
                detail={
                    "build-status": ["SUCCEEDED", "FAILED", "STOPPED"],
                    "project-name": [build_project.project_name]
                }))
        codebuild_completes_rule.add_target(targets.SnsTopic(sns_topic))

        cdk.CfnOutput(self,
                      "artifacts-bucket-output",
                      value=artifacts_bucket.bucket_name)
        cdk.CfnOutput(self,
                      "script-complete-topic-output",
                      value=sns_topic.topic_arn)
Exemplo n.º 15
0
    def __init__(self, scope: core.Construct, id: str, params: dict, **kwargs):  # pylint: disable=W0622
        super().__init__(scope, id, **kwargs)
        # pylint: disable=no-value-for-parameter
        stack = core.Stack.of(self)
        _pipeline = _codepipeline.Pipeline.from_pipeline_arn(
            self, 'pipeline', params["pipeline"])
        _source_account = params.get('source', {}).get('account_id')
        _provider = params.get('source', {}).get('provider')
        _add_trigger_on_changes = (
            _provider == 'codecommit' and _source_account
            and params.get('source', {}).get('trigger_on_changes')
            and not params.get('source', {}).get('poll_for_changes'))

        name = params.get('name')
        account_id = params['source']['account_id']
        repo_name = params['source']['repo_name']

        if _add_trigger_on_changes:
            _event = _events.Rule(
                self,
                f'trigger_{name}',
                description=
                f'Triggers {name} on changes in source CodeCommit repository',
                event_pattern=_events.
                EventPattern(resources=[
                    f'arn:{stack.partition}:codecommit:{ADF_DEPLOYMENT_REGION}:{account_id}:{repo_name}'
                ],
                             source=["aws.codecommit"],
                             detail_type=[
                                 'CodeCommit Repository State Change'
                             ],
                             detail={
                                 "event":
                                 ["referenceCreated", "referenceUpdated"],
                                 "referenceType": ["branch"],
                                 "referenceName": [params['source']['branch']]
                             }))
            _event.add_target(_targets.CodePipeline(pipeline=_pipeline))
        if params.get('topic_arn'):
            # pylint: disable=no-value-for-parameter
            _topic = _sns.Topic.from_topic_arn(self, 'topic_arn',
                                               params["topic_arn"])
            _event = _events.Rule(
                self,
                f'pipeline_state_{name}',
                description=
                f"{name} | Trigger notifications based on pipeline state changes",
                enabled=True,
                event_pattern=_events.EventPattern(
                    detail={
                        "state": ["FAILED", "STARTED", "SUCCEEDED"],
                        "pipeline": [
                            f"{ADF_PIPELINE_PREFIX}{name}",
                        ]
                    },
                    detail_type=[
                        "CodePipeline Pipeline Execution State Change"
                    ],
                    source=["aws.codepipeline"]))
            _event.add_target(
                _targets.SnsTopic(
                    topic=_topic,
                    message=_events.RuleTargetInput.from_text(
                        # Need to parse and get the pipeline: "$.detail.pipeline" state: "$.detail.state"
                        f"The pipeline {_events.EventField.from_path('$.detail.pipeline')} "
                        f"from account {_events.EventField.account} "
                        f"has {_events.EventField.from_path('$.detail.state')} "
                        f"at {_events.EventField.time}.")))
        if params.get('completion_trigger'):
            # There might be other types of completion triggers later, eg lambda..
            for index, pipeline in enumerate(params['completion_trigger'].get(
                    'pipelines', [])):
                _event = _events.Rule(
                    self,
                    f'completion_{pipeline}',
                    description=
                    "Triggers {pipeline} on completion of {params['pipeline']}",
                    enabled=True,
                    event_pattern=_events.EventPattern(
                        detail={
                            "state": ["SUCCEEDED"],
                            "pipeline": [
                                f"{ADF_PIPELINE_PREFIX}{name}",
                            ]
                        },
                        detail_type=[
                            "CodePipeline Pipeline Execution State Change"
                        ],
                        source=["aws.codepipeline"]))
                # pylint: disable=no-value-for-parameter
                _completion_pipeline = _codepipeline.Pipeline.from_pipeline_arn(
                    self, f'pipeline-{index}',
                    f'arn:{stack.partition}:codepipeline:'
                    f'{ADF_DEPLOYMENT_REGION}:{ADF_DEPLOYMENT_ACCOUNT_ID}:'
                    f'{ADF_PIPELINE_PREFIX}{pipeline}')
                _event.add_target(
                    _targets.CodePipeline(pipeline=_completion_pipeline))
        if params.get('schedule'):
            _event = _events.Rule(
                self,
                f'schedule_{params["name"]}',
                description=
                f"Triggers {params['name']} on a schedule of {params['schedule']}",
                enabled=True,
                # pylint: disable=no-value-for-parameter
                schedule=_events.Schedule.expression(params['schedule']))
            _target_pipeline = _targets.CodePipeline(pipeline=_pipeline)
            _event.add_target(_target_pipeline)
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        CLUSTER_NAME = self.node.try_get_context("cluster_name")
        NOTIFY_EMAIL = self.node.try_get_context("notify_email")
        SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url")
        WHITE_LIST_GROUP = self.node.try_get_context("white_list_group")

        if (not CLUSTER_NAME or not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL
                or not WHITE_LIST_GROUP):
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:
            # Create explicit deny policy
            policy = iam.ManagedPolicy(
                self,
                "InAur02RdsDenyPolicy",
                managed_policy_name="InAur02RdsDenyPolicy",
                statements=[
                    iam.PolicyStatement(
                        actions=["rds:*", "iam:*"],
                        effect=iam.Effect.DENY,
                        resources=["*"],
                    )
                ],
            )

            # Create lambda function
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "in_aur_02")
            lambda_func = _lambda.Function(
                self,
                "InAur02ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="response_lambda.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                environment={
                    "webhook_url": SLACK_WEBHOOK_URL,
                    "policy_arn": policy.managed_policy_arn,
                    "cluster_name": CLUSTER_NAME,
                    "white_list_group": WHITE_LIST_GROUP,
                },
            )
            # Assign permissions to response lambda
            lambda_func.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["iam:AttachUserPolicy", "iam:GetGroup"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            # Create new SNS topic
            topic = sns.Topic(self, "InAur02DetectionTopic")

            # Add email subscription
            topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL))

            # Create new event rule to trigger lambda
            # when there are AWS RDS API calls
            events.Rule(
                self,
                "InAur02DetectionEventRule",
                event_pattern=events.EventPattern(
                    source=["aws.rds"],
                    detail_type=["AWS API Call via CloudTrail"],
                    detail={"eventSource": ["rds.amazonaws.com"]},
                ),
                targets=[
                    targets.LambdaFunction(handler=lambda_func),
                    targets.SnsTopic(topic),
                ],
            )
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        security_distribution_list_email = '*****@*****.**'

        # securityhub_instance = securityhub.CfnHub(self, 'SecurityHub')

        # Ensure AWS Config is enabled / Ensure CloudTrail is enabled in all Regions 2.1 - 2.8
        cloudtrail_bucket_accesslogs = s3.Bucket(
            self,
            "CloudTrailS3Accesslogs",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.RETAIN)

        cloudtrail_bucket = s3.Bucket(
            self,
            "CloudTrailS3",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.RETAIN,
            server_access_logs_bucket=cloudtrail_bucket_accesslogs,
        )

        cloudtrail_kms = kms.Key(self,
                                 "CloudTrailKey",
                                 enable_key_rotation=True)

        # CloudTrail - single account, not Organization
        trail = cloudtrail.Trail(
            self,
            "CloudTrail",
            enable_file_validation=True,
            is_multi_region_trail=True,
            include_global_service_events=True,
            send_to_cloud_watch_logs=True,
            cloud_watch_logs_retention=logs.RetentionDays.FOUR_MONTHS,
            bucket=cloudtrail_bucket,
            kms_key=cloudtrail_kms)

        cloudtrail_kms.grant(iam.ServicePrincipal('cloudtrail.amazonaws.com'),
                             'kms:DescribeKey')

        cloudtrail_kms.grant(
            iam.ServicePrincipal(
                'cloudtrail.amazonaws.com',
                conditions={
                    'StringLike': {
                        'kms:EncryptionContext:aws:cloudtrail:arn':
                        'arn:aws:cloudtrail:*:' + core.Stack.of(self).account +
                        ':trail/*'
                    }
                }), 'kms:GenerateDataKey*')

        cloudtrail_kms.add_to_resource_policy(
            iam.PolicyStatement(
                actions=["kms:Decrypt", "kms:ReEncryptFrom"],
                conditions={
                    'StringEquals': {
                        'kms:CallerAccount': core.Stack.of(self).account
                    },
                    'StringLike': {
                        'kms:EncryptionContext:aws:cloudtrail:arn':
                        'arn:aws:cloudtrail:*:' + core.Stack.of(self).account +
                        ':trail/*'
                    }
                },
                effect=iam.Effect.ALLOW,
                principals=[iam.AnyPrincipal()],
                resources=['*']))

        cloudtrail_kms.add_to_resource_policy(
            iam.PolicyStatement(actions=["kms:CreateAlias"],
                                conditions={
                                    'StringEquals': {
                                        'kms:CallerAccount':
                                        core.Stack.of(self).account,
                                        'kms:ViaService':
                                        'ec2.' + core.Stack.of(self).region +
                                        '.amazonaws.com'
                                    }
                                },
                                effect=iam.Effect.ALLOW,
                                principals=[iam.AnyPrincipal()],
                                resources=['*']))

        cloudtrail_kms.add_to_resource_policy(
            iam.PolicyStatement(
                actions=["kms:Decrypt", "kms:ReEncryptFrom"],
                conditions={
                    'StringEquals': {
                        'kms:CallerAccount': core.Stack.of(self).account
                    },
                    'StringLike': {
                        'kms:EncryptionContext:aws:cloudtrail:arn':
                        'arn:aws:cloudtrail:*:' + core.Stack.of(self).account +
                        ':trail/*'
                    }
                },
                effect=iam.Effect.ALLOW,
                principals=[iam.AnyPrincipal()],
                resources=['*']))

        config_role = iam.CfnServiceLinkedRole(
            self,
            id='ServiceLinkedRoleConfig',
            aws_service_name='config.amazonaws.com')

        global_config = config.CfnConfigurationRecorder(self, 'ConfigRecorder',
                                                        name='default',
                                                        # role_arn=config_role.role_arn,
                                                        role_arn="arn:aws:iam::" + \
                                                        core.Stack.of(
                                                            self).account+":role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig",
                                                        # role_arn=config_role.get_att(
                                                        #     attribute_name='resource.arn').to_string(),
                                                        recording_group=config.CfnConfigurationRecorder.RecordingGroupProperty(
                                                            all_supported=True,
                                                            include_global_resource_types=True
                                                        )
                                                        )

        config_bucket = s3.Bucket(
            self,
            "ConfigS3",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.RETAIN,
        )

        config_bucket.add_to_resource_policy(
            iam.PolicyStatement(
                actions=['s3:GetBucketAcl'],
                effect=iam.Effect.ALLOW,
                principals=[iam.ServicePrincipal('config.amazonaws.com')],
                resources=[config_bucket.bucket_arn]))

        config_bucket.add_to_resource_policy(
            iam.PolicyStatement(
                actions=['s3:PutObject'],
                effect=iam.Effect.ALLOW,
                principals=[iam.ServicePrincipal('config.amazonaws.com')],
                resources=[
                    config_bucket.arn_for_objects('AWSLogs/' +
                                                  core.Stack.of(self).account +
                                                  '/Config/*')
                ],
                conditions={
                    "StringEquals": {
                        's3:x-amz-acl': 'bucket-owner-full-control',
                    }
                }))

        config_delivery_stream = config.CfnDeliveryChannel(
            self,
            "ConfigDeliveryChannel",
            s3_bucket_name=config_bucket.bucket_name)

        # Config Aggregator in Organizations account
        # config_aggregator = config.CfnConfigurationAggregator(self, 'ConfigAggregator',
        #                                                       configuration_aggregator_name='ConfigAggregator',
        #                                                       organization_aggregation_source=config.CfnConfigurationAggregator.OrganizationAggregationSourceProperty(
        #                                                           role_arn=iam.Role(self, "AWSConfigRoleForOrganizations",
        #                                                                             assumed_by=iam.ServicePrincipal(
        #                                                                                 'config.amazonaws.com'),
        #                                                                             managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(
        #                                                                                 'service-role/AWSConfigRoleForOrganizations')]
        #                                                                             ).role_arn,
        #                                                           all_aws_regions=True
        #                                                       )
        #                                                       )

        # 2.9 – Ensure VPC flow logging is enabled in all VPCs
        # vpc = ec2.Vpc.from_lookup(self, "VPC",
        #                           is_default=True,
        #                           )

        # S3 for VPC flow logs
        # vpc_flow_logs_bucket = s3.Bucket(self, "VPCFlowLogsBucket",
        #                                  block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        #                                  encryption=s3.BucketEncryption.S3_MANAGED,
        #                                  removal_policy=core.RemovalPolicy.RETAIN
        #                                  )

        # Ensure a log metric filter and alarm exist for 3.1 – 3.14
        security_notifications_topic = sns.Topic(self,
                                                 'CIS_Topic',
                                                 display_name='CIS_Topic',
                                                 topic_name='CIS_Topic')

        sns.Subscription(self,
                         'CIS_Subscription',
                         topic=security_notifications_topic,
                         protocol=sns.SubscriptionProtocol.EMAIL,
                         endpoint=security_distribution_list_email)

        cloudwatch_actions_cis = cloudwatch_actions.SnsAction(
            security_notifications_topic)

        cis_metricfilter_alarms = {
            'CIS-3.1-UnauthorizedAPICalls':
            '($.errorCode="*UnauthorizedOperation") || ($.errorCode="AccessDenied*")',
            'CIS-3.2-ConsoleSigninWithoutMFA':
            '($.eventName="ConsoleLogin") && ($.additionalEventData.MFAUsed !="Yes")',
            'RootAccountUsageAlarm':
            '$.userIdentity.type="Root" && $.userIdentity.invokedBy NOT EXISTS && $.eventType !="AwsServiceEvent"',
            'CIS-3.4-IAMPolicyChanges':
            '($.eventName=DeleteGroupPolicy) || ($.eventName=DeleteRolePolicy) || ($.eventName=DeleteUserPolicy) || ($.eventName=PutGroupPolicy) || ($.eventName=PutRolePolicy) || ($.eventName=PutUserPolicy) || ($.eventName=CreatePolicy) || ($.eventName=DeletePolicy) || ($.eventName=CreatePolicyVersion) || ($.eventName=DeletePolicyVersion) || ($.eventName=AttachRolePolicy) || ($.eventName=DetachRolePolicy) || ($.eventName=AttachUserPolicy) || ($.eventName=DetachUserPolicy) || ($.eventName=AttachGroupPolicy) || ($.eventName=DetachGroupPolicy)',
            'CIS-3.5-CloudTrailChanges':
            '($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)',
            'CIS-3.6-ConsoleAuthenticationFailure':
            '($.eventName=ConsoleLogin) && ($.errorMessage="Failed authentication")',
            'CIS-3.7-DisableOrDeleteCMK':
            '($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))',
            'CIS-3.8-S3BucketPolicyChanges':
            '($.eventSource=s3.amazonaws.com) && (($.eventName=PutBucketAcl) || ($.eventName=PutBucketPolicy) || ($.eventName=PutBucketCors) || ($.eventName=PutBucketLifecycle) || ($.eventName=PutBucketReplication) || ($.eventName=DeleteBucketPolicy) || ($.eventName=DeleteBucketCors) || ($.eventName=DeleteBucketLifecycle) || ($.eventName=DeleteBucketReplication))',
            'CIS-3.9-AWSConfigChanges':
            '($.eventSource=config.amazonaws.com) && (($.eventName=StopConfigurationRecorder) || ($.eventName=DeleteDeliveryChannel) || ($.eventName=PutDeliveryChannel) || ($.eventName=PutConfigurationRecorder))',
            'CIS-3.10-SecurityGroupChanges':
            '($.eventName=AuthorizeSecurityGroupIngress) || ($.eventName=AuthorizeSecurityGroupEgress) || ($.eventName=RevokeSecurityGroupIngress) || ($.eventName=RevokeSecurityGroupEgress) || ($.eventName=CreateSecurityGroup) || ($.eventName=DeleteSecurityGroup)',
            'CIS-3.11-NetworkACLChanges':
            '($.eventName=CreateNetworkAcl) || ($.eventName=CreateNetworkAclEntry) || ($.eventName=DeleteNetworkAcl) || ($.eventName=DeleteNetworkAclEntry) || ($.eventName=ReplaceNetworkAclEntry) || ($.eventName=ReplaceNetworkAclAssociation)',
            'CIS-3.12-NetworkGatewayChanges':
            '($.eventName=CreateCustomerGateway) || ($.eventName=DeleteCustomerGateway) || ($.eventName=AttachInternetGateway) || ($.eventName=CreateInternetGateway) || ($.eventName=DeleteInternetGateway) || ($.eventName=DetachInternetGateway)',
            'CIS-3.13-RouteTableChanges':
            '($.eventName=CreateRoute) || ($.eventName=CreateRouteTable) || ($.eventName=ReplaceRoute) || ($.eventName=ReplaceRouteTableAssociation) || ($.eventName=DeleteRouteTable) || ($.eventName=DeleteRoute) || ($.eventName=DisassociateRouteTable)',
            'CIS-3.14-VPCChanges':
            '($.eventName=CreateVpc) || ($.eventName=DeleteVpc) || ($.eventName=ModifyVpcAttribute) || ($.eventName=AcceptVpcPeeringConnection) || ($.eventName=CreateVpcPeeringConnection) || ($.eventName=DeleteVpcPeeringConnection) || ($.eventName=RejectVpcPeeringConnection) || ($.eventName=AttachClassicLinkVpc) || ($.eventName=DetachClassicLinkVpc) || ($.eventName=DisableVpcClassicLink) || ($.eventName=EnableVpcClassicLink)',
        }
        for x, y in cis_metricfilter_alarms.items():
            str_x = str(x)
            str_y = str(y)
            logs.MetricFilter(
                self,
                "MetricFilter_" + str_x,
                log_group=trail.log_group,
                filter_pattern=logs.JsonPattern(json_pattern_string=str_y),
                metric_name=str_x,
                metric_namespace="LogMetrics",
                metric_value='1')
            cloudwatch.Alarm(
                self,
                "Alarm_" + str_x,
                alarm_name=str_x,
                alarm_description=str_x,
                statistic='Sum',
                period=core.Duration.minutes(5),
                comparison_operator=cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                evaluation_periods=1,
                threshold=1,
                metric=cloudwatch.Metric(metric_name=str_x,
                                         namespace="LogMetrics"),
            ).add_alarm_action(cloudwatch_actions_cis)

        # IAM Password Policy custom resource CIS 1.5 - 1.11
        cfn_template = cfn_inc.CfnInclude(
            self,
            "includeTemplate",
            template_file="account-password-policy.yaml",
            parameters={
                "MaxPasswordAge": 90,
                "MinimumPasswordLength": 14,
                "PasswordReusePrevention": 24,
                "RequireLowercaseCharacters": True,
                "RequireNumbers": True,
                "RequireSymbols": True,
                "RequireUppercaseCharacters": True,
            })

        # CIS 1.20
        support_role = iam.Role(
            self,
            "SupportRole",
            assumed_by=iam.AccountPrincipal(
                account_id=core.Stack.of(self).account),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AWSSupportAccess')
            ],
            role_name='AWSSupportAccess')

        guardduty_detector = guardduty.CfnDetector(self,
                                                   'GuardDutyDetector',
                                                   enable=True)

        guardduty_event = events.Rule(
            self,
            'GuardDutyEvent',
            rule_name='guardduty-notification',
            description='GuardDuty Notification',
            event_pattern=events.EventPattern(
                source=['aws.guardduty'], detail_type=['GuardDuty Finding']),
            targets=[events_targets.SnsTopic(security_notifications_topic)])
Exemplo n.º 18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            constraint_description=
            "Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320)
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description=
            "Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3)
        access_bucket_name = core.CfnParameter(
            self,
            "ACCESS_BUCKET",
            type="String",
            description="Bucket name for access logs.",
            min_length=3)
        custom_container = core.CfnParameter(
            self,
            "CUSTOM_CONTAINER",
            type="String",
            description=
            "Should point to a zip file containing dockerfile and assets for building a custom model. If empty it will beusing containers from SageMaker Registry",
        )
        model_framework = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK",
            type="String",
            description=
            "The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.",
        )
        model_framework_version = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK_VERSION",
            type="String",
            description=
            "The version of the ML framework which is used for training the model. E.g., 1.1-2",
        )
        model_name = core.CfnParameter(
            self,
            "MODEL_NAME",
            type="String",
            description="An arbitrary name for the model.",
            min_length=1)
        model_artifact_location = core.CfnParameter(
            self,
            "MODEL_ARTIFACT_LOCATION",
            type="String",
            description="Path to model artifact inside assets bucket.",
        )
        training_data = core.CfnParameter(
            self,
            "TRAINING_DATA",
            type="String",
            description=
            "Location of the training data in PipelineAssets S3 Bucket.",
        )
        inference_instance = core.CfnParameter(
            self,
            "INFERENCE_INSTANCE",
            type="String",
            description=
            "Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern='^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            min_length=7)
        inference_type = core.CfnParameter(
            self,
            "INFERENCE_TYPE",
            type="String",
            allowed_values=["batch", "realtime"],
            default="realtime",
            description="Type of inference. Possible values: batch | realtime",
        )
        batch_inference_data = core.CfnParameter(
            self,
            "BATCH_INFERENCE_DATA",
            type="String",
            default="",
            description=
            "Location of batch inference data if inference type is set to batch. Otherwise, can be left empty.",
        )

        # Resources #

        access_bucket = s3.Bucket.from_bucket_name(
            self, "AccessBucket", access_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(
            self, "BlueprintBucket", blueprint_bucket_name.value_as_string)
        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_bucket,
            server_access_logs_prefix="BYOMBatchBuiltinStack",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        assets_bucket.node.default_child.cfn_options.metadata = suppress_assets_bucket(
        )

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action(
            model_artifact_location, assets_bucket)

        # deploy stage
        sm_layer = sagemaker_layer(self, blueprint_bucket)
        # creating a sagemaker model
        create_model_definition = create_model(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            model_artifact_location,
            custom_container,
            model_framework,
            model_framework_version,
            "",
            sm_layer,
        )
        # creating a batch transform job
        batch_transform_definition = batch_transform(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            inference_instance,
            batch_inference_data,
            sm_layer,
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # createing pipeline stages
        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        deploy_stage_batch = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[create_model_definition, batch_transform_definition],
        )

        batch_nobuild_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineBatchBuiltIn",
            stages=[source_stage, deploy_stage_batch],
            cross_account_keys=False,
        )
        pipeline_rule = batch_nobuild_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            event_pattern=events.EventPattern(
                detail={'state': ['SUCCEEDED', 'FAILED']}),
        )
        batch_nobuild_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ]))
        # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed
        # There is room for improving the method in future versions to find CDK nodes without having to use
        # hardocded index numbers
        pipeline_child_nodes = batch_nobuild_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        pipeline_child_nodes[
            13].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            19].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            24].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        # pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipeline
        pipeline_permissions(batch_nobuild_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(self,
                       id="AssetsBucket",
                       value='https://s3.console.aws.amazon.com/s3/buckets/' +
                       assets_bucket.bucket_name,
                       description="S3 Bucket to upload model artifact")
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            f'https://console.aws.amazon.com/codesuite/codepipeline/pipelines/{batch_nobuild_pipeline.pipeline_name}/view?region={core.Aws.REGION}'
        )
Exemplo n.º 19
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        high_cpu_topic = sns.Topic(self, 'high-cpu-topic',
                                   display_name='myHighCpuAlarm')
        # phone number format must be 12225558888 for US
        phone_param = ssm.StringParameter.from_string_parameter_name(self, 'phone-param',
                                                                     'notification-phone')
        high_cpu_topic_sub = sns.Subscription(self, 'high-cpu-topic-sub',
                                              topic=high_cpu_topic,
                                              protocol=sns.SubscriptionProtocol.SMS,
                                              endpoint=phone_param.string_value)

        default_vpc = ec2.Vpc.from_lookup(self, 'default-vpc', is_default=True)
        monitored_instance = ec2.Instance(self, 'monitored-instance',
                                          instance_name='devassoc-monitored',
                                          instance_type=type.R3_XLARGE,
                                          machine_image=ec2.MachineImage.generic_linux(
                                              ami_map=ami_map
                                          ),
                                          vpc=default_vpc)

        high_cpu_metric = cw.Metric(namespace='AWS/EC2',
                                    metric_name='CPUUtilization',
                                    dimensions={
                                        'InstanceId': monitored_instance.instance_id
                                    },
                                    statistic='Average',
                                    unit=cw.Unit.PERCENT,
                                    period=core.Duration.seconds(300))
        high_cpu_alarm = high_cpu_metric.create_alarm(self, 'high-cpu-alarm',
                                                      alarm_name='cpu-mon',
                                                      alarm_description='Alarm when CPU exceeds 70%',
                                                      comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
                                                      evaluation_periods=2,
                                                      period=core.Duration.seconds(300),
                                                      threshold=70,
                                                      actions_enabled=True)
        high_cpu_action = cwa.SnsAction(high_cpu_topic)
        high_cpu_alarm.add_alarm_action(high_cpu_action)

        ec2.CfnEIP(self, 'devassoc-elastic-ip')

        # not really a service role, but there are problems with that, per
        # https://github.com/aws/aws-cdk/issues/3492
        config_service_role = iam.Role(self, 'devassoc-config-service-role',
                                       assumed_by=iam.ServicePrincipal('config.amazonaws.com'),
                                       managed_policies=[
                                           iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSConfigRole')
                                       ])
        config_recorder = config.CfnConfigurationRecorder(self, 'devassoc-recorder',
                                                          name='ConfigRecorder',
                                                          role_arn=config_service_role.role_arn,
                                                          recording_group=config.CfnConfigurationRecorder.RecordingGroupProperty(
                                                              all_supported=True)
                                                          )
        config_bucket = s3.Bucket(self, 'config-bucket',
                                  bucket_name='devassoc-config',
                                  removal_policy=core.RemovalPolicy.DESTROY,
                                  auto_delete_objects=True)
        config_bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                                 principals=[iam.ServicePrincipal('config.amazonaws.com')],
                                                                 resources=[config_bucket.bucket_arn],
                                                                 actions=['s3:GetBucketAcl']))
        config_bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                                 principals=[iam.ServicePrincipal('config.amazonaws.com')],
                                                                 resources=[config_bucket.arn_for_objects(
                                                                     f"AWSLogs/{core.Stack.of(self).account}/Config/*")],
                                                                 actions=['s3:PutObject'],
                                                                 conditions={'StringEquals': {
                                                                     's3:x-amz-acl': 'bucket-owner-full-control'}}))
        eip_rule = config.ManagedRule(self, 'devassoc-managed-rule',
                                      identifier=config.ManagedRuleIdentifiers.EIP_ATTACHED,
                                      config_rule_name='devassoc-eip-rule')
        eip_rule.node.add_dependency(config_recorder)
        eip_compliance_topic = sns.Topic(self, 'eip-compliance-topic',
                                         display_name='EIP Compliance Topic')
        eip_compliance_topic_sub = sns.Subscription(self, 'eip-compliance-topic-sub',
                                                    topic=eip_compliance_topic,
                                                    protocol=sns.SubscriptionProtocol.SMS,
                                                    endpoint=phone_param.string_value)
        eip_rule.on_compliance_change('eip-compliance-change',
                                      target=targets.SnsTopic(eip_compliance_topic))
        config.CfnDeliveryChannel(self, 'devassoc-config-delivery',
                                  s3_bucket_name=config_bucket.bucket_name,
                                  sns_topic_arn=eip_compliance_topic.topic_arn)
Exemplo n.º 20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            constraint_description="Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320,
        )
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description="Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3,
        )
        assets_bucket_name = core.CfnParameter(
            self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3
        )
        endpoint_name = core.CfnParameter(
            self, "ENDPOINT_NAME", type="String", description="The name of the ednpoint to monitor", min_length=1
        )
        baseline_job_output_location = core.CfnParameter(
            self,
            "BASELINE_JOB_OUTPUT_LOCATION",
            type="String",
            description="S3 prefix to store the Data Baseline Job's output.",
        )
        monitoring_output_location = core.CfnParameter(
            self,
            "MONITORING_OUTPUT_LOCATION",
            type="String",
            description="S3 prefix to store the Monitoring Schedule output.",
        )
        schedule_expression = core.CfnParameter(
            self,
            "SCHEDULE_EXPRESSION",
            type="String",
            description="cron expression to run the monitoring schedule. E.g., cron(0 * ? * * *), cron(0 0 ? * * *), etc.",
            allowed_pattern="^cron(\\S+\\s){5}\\S+$",
        )
        training_data = core.CfnParameter(
            self,
            "TRAINING_DATA",
            type="String",
            description="Location of the training data in PipelineAssets S3 Bucket.",
        )
        instance_type = core.CfnParameter(
            self,
            "INSTANCE_TYPE",
            type="String",
            description="Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=7,
        )
        instance_volume_size = core.CfnParameter(
            self,
            "INSTANCE_VOLUME_SIZE",
            type="Number",
            description="Instance volume size used in model moniroing jobs. E.g., 20",
        )
        monitoring_type = core.CfnParameter(
            self,
            "MONITORING_TYPE",
            type="String",
            allowed_values=["dataquality", "modelquality", "modelbias", "modelexplainability"],
            default="dataquality",
            description="Type of model monitoring. Possible values: DataQuality | ModelQuality | ModelBias | ModelExplainability ",
        )
        max_runtime_seconds = core.CfnParameter(
            self,
            "MAX_RUNTIME_SIZE",
            type="Number",
            description="Max runtime in secodns the job is allowed to run. E.g., 3600",
        )
        baseline_job_name = core.CfnParameter(
            self,
            "BASELINE_JOB_NAME",
            type="String",
            description="Unique name of the data baseline job",
            min_length=3,
            max_length=63,
        )
        monitoring_schedule_name = core.CfnParameter(
            self,
            "MONITORING_SCHEDULE_NAME",
            type="String",
            description="Unique name of the monitoring schedule job",
            min_length=3,
            max_length=63,
        )
        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_model_monitor(training_data, assets_bucket)

        # deploy stage
        # creating data baseline job
        baseline_lambda_arn, create_baseline_job_definition = create_data_baseline_job(
            self,
            blueprint_bucket,
            assets_bucket,
            baseline_job_name,
            training_data,
            baseline_job_output_location,
            endpoint_name,
            instance_type,
            instance_volume_size,
            max_runtime_seconds,
            core.Aws.STACK_NAME,
        )
        # creating monitoring schedule
        monitor_lambda_arn, create_monitoring_schedule_definition = create_monitoring_schedule(
            self,
            blueprint_bucket,
            assets_bucket,
            baseline_job_output_location,
            baseline_job_name,
            monitoring_schedule_name,
            monitoring_output_location,
            schedule_expression,
            endpoint_name,
            instance_type,
            instance_volume_size,
            max_runtime_seconds,
            monitoring_type,
            core.Aws.STACK_NAME,
        )
        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[baseline_lambda_arn, monitor_lambda_arn],
        )
        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])
        deploy_stage_model_monitor = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[
                create_baseline_job_definition,
                create_monitoring_schedule_definition,
            ],
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "ModelMonitorPipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # constructing Model Monitor pipelines
        model_monitor_pipeline = codepipeline.Pipeline(
            self,
            "ModelMonitorPipeline",
            stages=[source_stage, deploy_stage_model_monitor],
            cross_account_keys=False,
        )
        model_monitor_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        model_monitor_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )
        # add lambda permissons
        model_monitor_pipeline.add_to_role_policy(invoke_lambdas_policy)

        pipeline_child_nodes = model_monitor_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[24].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(model_monitor_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="MonitorPipeline",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{model_monitor_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )

        core.CfnOutput(
            self,
            id="DataBaselineJobName",
            value=baseline_job_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="MonitoringScheduleJobName",
            value=monitoring_schedule_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="MonitoringScheduleType",
            value=monitoring_type.value_as_string,
        )
Exemplo n.º 21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        template_zip_name = create_template_zip_name_parameter(self)
        template_file_name = create_template_file_name_parameter(self)
        template_params_file_name = create_stage_params_file_name_parameter(
            self, "TEMPLATE_PARAMS_NAME", "main")
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        stack_name = create_stack_name_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(
            self, "AssetsBucket", assets_bucket_name.value_as_string)

        # create sns topic and subscription
        pipeline_notification_topic = sns.Topic(
            self,
            "SinglePipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_template(
            template_zip_name, assets_bucket)

        # create cloudformation action
        cloudformation_action = create_cloudformation_action(
            self,
            "deploy_stack",
            stack_name.value_as_string,
            source_output,
            template_file_name.value_as_string,
            template_params_file_name.value_as_string,
        )

        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        deploy = codepipeline.StageProps(
            stage_name="DeployCloudFormation",
            actions=[cloudformation_action],
        )

        single_account_pipeline = codepipeline.Pipeline(
            self,
            "SingleAccountPipeline",
            stages=[source_stage, deploy],
            cross_account_keys=False,
        )

        # Add CF suppressions to the action
        deployment_policy = cloudformation_action.deployment_role.node.find_all(
        )[2]
        deployment_policy.node.default_child.cfn_options.metadata = suppress_cloudformation_action(
        )

        # add notification to the single-account pipeline
        single_account_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text((
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                    f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            ),
            event_pattern=events.EventPattern(
                detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        single_account_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            ))

        # add cfn supressions
        pipeline_child_nodes = single_account_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        # attaching iam permissions to the pipelines
        pipeline_permissions(single_account_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            (f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
             f"{single_account_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
             ),
        )