예제 #1
0
파일: sns.py 프로젝트: vincentclaes/datajob
    def add_email_subscription(self) -> None:
        """Add an email or a list of emails as subscribers to a topic.

        :param sns_topic: an SNS Topic instance of aws cdk
        :param notification: email address as string or list of email addresses to be subscribed.
        :return: None
        """
        if isinstance(self.notification, list):
            for email in self.notification:
                self.sns_topic.add_subscription(
                    aws_sns_subscriptions.EmailSubscription(email))
        else:
            self.sns_topic.add_subscription(
                aws_sns_subscriptions.EmailSubscription(self.notification))
예제 #2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        sns_topic = sns.Topic(self, 'Topic')

        snsEmail = core.CfnParameter(
            self,
            'SNSEmail',
            default='PAnong@automation_rocks.com',
            description='Email Endpoint for SNS Notifications',
            type='String')

        email = sns_topic.add_subscription(
            subscriptions.EmailSubscription(snsEmail.value_as_string))

        cwAlarm = cw.CfnAlarm(
            self,
            'VPCAlarm',
            actions_enabled=True,
            alarm_actions=[sns_topic.topic_arn],
            alarm_description=
            "A CloudWatch Alarm that triggers when changes are made to the VPC.",
            comparison_operator="GreaterThanOrEqualToThreshold",
            evaluation_periods=1,
            treat_missing_data="notBreaching",
            threshold=1,
            metric_name="VpcEventCount",
            namespace="CloudTrailMetrics",
            period=300,
            statistic="Sum",
        )
예제 #3
0
    def __init__(self, scope: core.Construct, id: str,  bank_account_service: lambda_.Function,
                 stage: Optional[str] = 'prod', **kwargs) -> None:
        super().__init__(scope, id+'-'+stage, **kwargs)

        # create SNS topic
        topic = sns.Topic(self, "BankTopic", display_name="SMSOutbound", topic_name="SMSOutbound")
        topic.add_subscription(subs.EmailSubscription(email_address="*****@*****.**"))

        # create the EventBridge stuff
        bus_name = 'banking-demo-events-'+stage
        bus = events.EventBus(self, id, event_bus_name=bus_name)
        events.Rule(self, "HUMAN_REVIEWED_APPLICATION", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["HUMAN_REVIEWED_APPLICATION"]), rule_name="HUMAN_REVIEWED_APPLICATION", enabled=True,
                    targets=[
                        targets.SnsTopic(topic)
                    ])
        events.Rule(self, "APPLICATION_SUBMITTED", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["APPLICATION_SUBMITTED"]), rule_name="APPLICATION_SUBMITTED", enabled=True)
        events.Rule(self, "APPLICATION_APPROVED", event_bus=bus, event_pattern=events.EventPattern(
            detail_type=["APPLICATION_APPROVED"]), rule_name="APPLICATION_APPROVED", enabled=True,
                         targets=[
                             targets.LambdaFunction(lambda_.Function.from_function_arn(
                                 self, "func", bank_account_service.function_arn))
                         ])

        self._event_bus_arn = bus.event_bus_arn
예제 #4
0
    def __init__(self, scope: core.Construct, id: str, ec2_params: dict,
                 lambda_params: dict, sns_params: dict, **kwargs) -> None:
        """
        deploys all AWS resources for plus environment
            Resources:
                AWS::EC2::Instance with your details
                AWS::Lambda::Function with your policies
                AWS::Cloudwatch::Alarm for EC2 and Lambda
                AWS::Cloudwatch::Dashboard for EC2 and Lamnbda
                AWS::SNS::Topic for EC2 and Lambda alarms
        """
        super().__init__(scope, id, ec2_params, lambda_params, **kwargs)

        # sns
        topic = sns.Topic(self,
                          "Topic",
                          topic_name=sns_params['topic_name'],
                          display_name=sns_params['display_name'])
        topic.add_subscription(
            subscriptions.EmailSubscription(sns_params['endpoint']))

        # ec2
        self.ec2_alarm.add_alarm_action(cw_actions.SnsAction(topic))
        self.ec2_alarm.add_ok_action(cw_actions.SnsAction(topic))

        # lambda
        self.lambda_alarm.add_alarm_action(cw_actions.SnsAction(topic))
        self.lambda_alarm.add_ok_action(cw_actions.SnsAction(topic))
예제 #5
0
    def __init__(self, app: core.App, id: str, **kwargs):
        super().__init__(app, id, **kwargs)

        # [ SNS ] Topic:
        #
        # - The error topic for all issues.

        topic = sns.Topic(self, 'Topic', display_name='Pipeline Alert')

        # [ SNS ] Topic:
        #
        # - The error topic for all issues.

        sns_target = targets.SnsTopic(topic)

        # [ SNS ] Subscription:
        #
        # - Takes all emails in the list and creates email subscriptions for each.

        for email in notification_emails:
            topic.add_subscription(
                sns_subscriptions.EmailSubscription(email_address=email))

        # [ SNS ]
        #
        #

        self.sns_target = sns_target
 def _build_sub(cls, mechanism, phone, email):
     if mechanism == 'email':
         return sns_sub.EmailSubscription(email)
     elif mechanism == 'sms':
         return sns_sub.SmsSubscription(phone)
     else:
         raise ValueError(
             f"Notification mechanism '{mechanism}', must be one of {', '.join(cls.VALID_NOTIFICATIONS)}"
         )
예제 #7
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        
        #template = cfn_inc.CfnInclude(self, id='Template', template_file='template.yaml')
        # The code that defines your stack goes here
        bucket_names = 'config-1' + str(core.Aws.ACCOUNT_ID)
        sns_topic = _sns.Topic(self, id='topic-config', topic_name='config-topic')
        sns_topic.add_subscription(subscriptions.EmailSubscription("*****@*****.**"))
        bucket = s3.Bucket(self, id='s3cdkbuckets',bucket_name=bucket_names,versioned=True)
        bucket_arn2 = str(bucket.bucket_arn) + "/AWSLogs/" + str(core.Aws.ACCOUNT_ID) + "/Config/*"
        bucket_policy = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW, 
                                                                             resources=[bucket.bucket_arn],
                                                                             actions=["s3:GetBucketAcl"],
                                                                             sid = "AWSConfigBucketPermissionsCheck",
                                                                             principals=[iam.ServicePrincipal("config.amazonaws.com")]
                                                                             ))
        bucket_policy2 = bucket.add_to_resource_policy(iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                                                           resources=[bucket_arn2],
                                                                           actions=["s3:PutObject"],
                                                                           sid = "AWSConfigBucketDelivery",
                                                                           principals=[iam.ServicePrincipal("config.amazonaws.com")],
                                                                           conditions={"StringEquals": {
                                                                               "s3:x-amz-acl": "bucket-owner-full-control"}
                                                                                        }))
        recorder = config.CfnConfigurationRecorder(self,
                id='recorder',
                role_arn='arn:aws:iam::306646308112:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig',
                recording_group=None)
        channel = config.CfnDeliveryChannel(self,
                id='channel',
                s3_bucket_name=bucket.bucket_name,
                sns_topic_arn=sns_topic.topic_arn)
        time.sleep(20)
        srule = config.CfnConfigRule(self,
                id='rule1',
                source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="REQUIRED_TAGS"),  
                input_parameters={"tag1Key":"tagVal"})
        srule2 = config.CfnConfigRule(self, id='rule2',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="S3_BUCKET_LEVEL_PUBLIC_ACCESS_PROHIBITED"))
        srule3 = config.CfnConfigRule(self, id='rule3',
                 source=config.CfnConfigRule.SourceProperty(owner="AWS",
                    source_identifier="VPC_SG_OPEN_ONLY_TO_AUTHORIZED_PORTS"))
        srule.add_depends_on(recorder)
        srule2.add_depends_on(recorder)
        srule3.add_depends_on(recorder)
        event_rule = _events.Rule(self, id='event_rule', event_pattern = {
           "source": ["aws.config"],
           "detail": {
               "messageType": ["ConfigurationItemChangeNotification"],
               "newEvaluationResult": {
               "compliance_type": ["NON_COMPLIANT"]
    }
  }
})
        event_rule.add_target(targets.SnsTopic(sns_topic))
예제 #8
0
 def create_all_topics(self) -> None:
     """
     Create all stack topics
     """
     # Internal topics
     # General alarm topic to signal problems in stack execution
     # and e-mail subscription
     self.topics_["alarm_topic"] = sns.Topic(self, "alarm_topic")
     self.topics_["alarm_topic"].add_subscription(
         sns_subscriptions.EmailSubscription(settings.operator_email))
     # Public STAC item topic for new STAC items
     self.topics_["stac_item_topic"] = sns.Topic(self, "stac_item_topic")
     core.CfnOutput(
         self,
         "stac_item_topic_output",
         value=self.topics_["stac_item_topic"].topic_arn,
         description="STAC item topic",
     )
     sit_policy = iam.PolicyDocument(
         assign_sids=True,
         statements=[
             iam.PolicyStatement(
                 actions=["SNS:Subscribe", "SNS:Receive"],
                 principals=[iam.AnyPrincipal()],
                 resources=[self.topics_["stac_item_topic"].topic_arn],
             )
         ],
     )
     sit_policy.add_statements(
         iam.PolicyStatement(
             actions=[
                 "SNS:GetTopicAttributes",
                 "SNS:SetTopicAttributes",
                 "SNS:AddPermission",
                 "SNS:RemovePermission",
                 "SNS:DeleteTopic",
                 "SNS:Subscribe",
                 "SNS:ListSubscriptionsByTopic",
                 "SNS:Publish",
                 "SNS:Receive",
             ],
             principals=[iam.AccountPrincipal(self.account)],
             resources=[self.topics_["stac_item_topic"].topic_arn],
         ))
     # We could add the document directly to stac_item_policy
     sns.TopicPolicy(
         self,
         "sns_public_topic_policy",
         topics=[self.topics_["stac_item_topic"]],
         policy_document=sit_policy,
     )
     # Reconcile topic, used internally for reconciliation operations
     self.topics_["reconcile_stac_item_topic"] = sns.Topic(
         self, "reconcile_stac_item_topic")
예제 #9
0
 def __init__(self, scope: core.Construct, id: str,UserName="******",EmailAddress="default",**kwargs):
     super().__init__(scope, id, **kwargs)
     self.SNSTopicList = {}
     
     self.Topic_Batch_Job_Notification = _sns.Topic(self, "Batch_Job_Notification",
         display_name="BatchJobNotification_" + UserName,
         topic_name="BatchJobNotification_" + UserName
     )
     self.Topic_Batch_Job_Notification.add_subscription(_subs.EmailSubscription(EmailAddress))
     
     self.SNSTopicList["Topic_Batch_Job_Notification"] = self.Topic_Batch_Job_Notification
    def __init__(self, scope: core.Construct, id: str, notification_email):
        super().__init__(scope, id)
        stack_name = core.Stack.of(self).stack_name

        self.topic = sns.Topic(
            self,
            "topic",
            topic_name=f"{stack_name}-{names.NOTIFICATIONS_TOPIC}")
        self.topic.add_subscription(
            sns_subs.EmailSubscription(notification_email))

        self.custom_metric_namespace = f"{stack_name}-{names.METRIC_NAMESPACE}"
예제 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        user = iam.User(self, 'myuser',
                        managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name('AdministratorAccess')])

        trail = cloudtrail.Trail(self, 's3-account-activity',
                                 enable_file_validation=True,
                                 include_global_service_events=True,
                                 is_multi_region_trail=True,
                                 management_events=cloudtrail.ReadWriteType.ALL)

        fn = _lambda.Function(self, 'cloudtrail_reactivator',
                              description='Reactivates stopped CloudTrail logs',
                              code=_lambda.Code.from_asset('./lambda'),
                              handler='cloudtrail_reactivator.handler',
                              runtime=_lambda.Runtime.PYTHON_3_8,
                              initial_policy=[
                                  # Allow Lambda to re-activate CloudTrail logging.
                                  iam.PolicyStatement(resources=[trail.trail_arn],
                                                      actions=['cloudtrail:DescribeTrails',
                                                               'cloudtrail:GetTrailStatus',
                                                               'cloudtrail:StartLogging'],
                                                      effect=iam.Effect.ALLOW),
                                  # Allow Lambda to attach policies to user.
                                  iam.PolicyStatement(resources=[user.user_arn],
                                                      actions=['iam:AttachUserPolicy'],
                                                      effect=iam.Effect.ALLOW,
                                                      conditions={'ArnEquals': {"iam:PolicyARN": "arn:aws:iam::aws:policy/AWSDenyAll"}})
                              ])

        topic = sns.Topic(self, 'CloudTrailLoggingStateTransition')
        topic.add_subscription(subs.EmailSubscription('*****@*****.**'))
        topic.grant_publish(fn)

        fn.add_environment('SNS_ARN', topic.topic_arn)

        # Event Pattern that defines the CloudTrail events that should trigger
        # the Lambda.
        event_pattern = events.EventPattern(source=['aws.cloudtrail'],
                                            detail={'eventName':   ['StopLogging',
                                                                    'DeleteTrail',
                                                                    'UpdateTrail',
                                                                    'RemoveTags',
                                                                    'AddTags',
                                                                    'CreateTrail',
                                                                    'StartLogging',
                                                                    'PutEventSelectors'],
                                                    'eventSource': ['cloudtrail.amazonaws.com']})
        trail.on_cloud_trail_event('CloudTrailStateChange',
                                   description='Detects CloudTrail log state changes',
                                   target=events_targets.LambdaFunction(fn),
                                   event_pattern=event_pattern)
예제 #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create SNS Topic):

        konstone_topic = _sns.Topic(self,
                                    "konstoneHotTopics",
                                    display_name="Latest topics on KonStone",
                                    topic_name="konstoneHotTopic")

        # Add Subscription to SNS Topic
        konstone_topic.add_subscription(
            _subs.EmailSubscription("*****@*****.**"))
예제 #13
0
    def __init__(self, app: core.Construct, id: str, **kwargs) -> None:
        super().__init__(app, id)
        self.topic_name = kwargs.get('topic_name')
        self.email_addresses = kwargs.get('email_addresses')

        self.topic = sns.Topic(self,
                               'Topic',
                               topic_name=self.topic_name,
                               display_name=self.topic_name)

        for email_address in self.email_addresses:
            self.topic.add_subscription(
                sns_subscriptions.EmailSubscription(
                    email_address=email_address, ))
예제 #14
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #sns
        sns_topic = aws_sns.Topic(
            self,
            "snstopic01",
            display_name="Test Topic One",
            topic_name="TestTopic"
        )

        #add subscription to sns
        sns_topic.add_subscription(
            aws_sns_subc.EmailSubscription("*****@*****.**")
        )
예제 #15
0
    def createOps(self):
        alarmTopic = sns.Topic(self, 'TipBotAlarmTopic',
            display_name='TipBotAlarmTopic',
            fifo=False,
        )
        alarmTopic.add_subscription(snss.EmailSubscription(self.getEmail(), json=True))

        cw.CompositeAlarm(self, 'TipBotCompositeAlarm',
            alarm_rule=cw.AlarmRule.any_of(
                cw.Alarm(self, "LNDAlarm",
                    metric=cw.Metric(
                        metric_name='LndUp',
                        namespace='LNTipBot',
                        period=cdk.Duration.minutes(1),
                        statistic='sum',
                        unit=cw.Unit.NONE,
                    ),
                    threshold=1,
                    actions_enabled=False,
                    alarm_description='Alarm for when the LND service has gone down',
                    alarm_name='LND Alarm',
                    comparison_operator=cw.ComparisonOperator.LESS_THAN_THRESHOLD,
                    datapoints_to_alarm=5,
                    evaluation_periods=5,
                    treat_missing_data=cw.TreatMissingData.BREACHING
                ),
                cw.Alarm(self, "BTCAlarm",
                    metric=cw.Metric(
                        metric_name='BtcUp',
                        namespace='LNTipBot',
                        period=cdk.Duration.minutes(1),
                        statistic='sum',
                        unit=cw.Unit.NONE,
                    ),
                    threshold=1,
                    actions_enabled=False,
                    alarm_description='Alarm for when the BTC service has gone down',
                    alarm_name='BTC Alarm',
                    comparison_operator=cw.ComparisonOperator.LESS_THAN_THRESHOLD,
                    datapoints_to_alarm=5,
                    evaluation_periods=5,
                    treat_missing_data=cw.TreatMissingData.BREACHING
                )
            ),
            actions_enabled=True,
            alarm_description='TipBot Composite Alarm',
            composite_alarm_name='TipBot Composite Alarm',
        ).add_alarm_action(cwa.SnsAction(alarmTopic))
예제 #16
0
    def __init__(self, scope: Construct, id: str, **kwarg) -> None:
        super().__init__(scope, id, **kwarg)

        # define the table that maps short codes to URLs.
        table = aws_dynamodb.Table(self, "Table",
                                   partition_key=aws_dynamodb.Attribute(
                                       name="id",
                                       type=aws_dynamodb.AttributeType.STRING),
                                   read_capacity=10,
                                   write_capacity=5)

        # define the API gateway request handler. all API requests will go to the same function.
        handler = aws_lambda.Function(self, "UrlShortenerFunction",
                                      code=aws_lambda.Code.asset("./lambda"),
                                      handler="handler.main",
                                      timeout=Duration.minutes(5),
                                      runtime=aws_lambda.Runtime.PYTHON_3_7)

        # generate the topic to publish to
        topic = aws_sns.Topic(self, "Topic", display_name="Url created topic")
        topic.add_subscription(aws_sns_subscriptions.EmailSubscription("*****@*****.**"))

        # pass the table name to the handler through an environment variable and grant
        # the handler read/write permissions on the table.
        handler.add_environment('TABLE_NAME', table.table_name)
        handler.add_environment('TOPIC_ARN', topic.topic_arn)
        table.grant_read_write_data(handler)
        topic.grant_publish(handler)

        # define the API endpoint and associate the handler
        api = aws_apigateway.LambdaRestApi(self, "UrlShortenerApi", handler=handler)

        # define the static website hosting
        frontendBucket = aws_s3.Bucket(self, "UrlShortenerWebsiteBucket",
                                       public_read_access=True,
                                       removal_policy=core.RemovalPolicy.DESTROY,
                                       website_index_document="index.html")

        deployment = aws_s3_deployment.BucketDeployment(self, "deployStaticWebsite",
                                                        sources=[aws_s3_deployment.Source.asset("./frontend")],
                                                        destination_bucket=frontendBucket)

        # define a Watchful monitoring system and watch the entire scope
        # this will automatically find all watchable resources and add
        # them to our dashboard
        wf = Watchful(self, 'watchful', alarm_email='*****@*****.**')
        wf.watch_scope(self)
예제 #17
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # Validated require props.
        required_props_keys = [
            'MonitoringUrl', 'CanaryFrequency', 'NotificationEmail'
        ]
        for k in required_props_keys:
            if k not in props or not props[k]:
                raise ValueError("Required prop %s is not present" % k)

        canary_fn = lambda_.Function(
            self,
            'GhostCanaryFn',
            runtime=lambda_.Runtime.PYTHON_3_6,
            code=lambda_.Code.asset('lambda'),
            handler='ghost_canary.handler',
            environment={'WebsiteUrl': props['MonitoringUrl']})

        rule = events.Rule(
            self,
            'CanarySchedule',
            schedule=events.Schedule.rate(
                core.Duration.seconds(props['CanaryFrequency'])),
        )

        rule.add_target(events_targets.LambdaFunction(canary_fn))

        alarm = cw.Alarm(
            self,
            'CanaryAlarm',
            metric=canary_fn.metric('Errors'),
            threshold=1,
            evaluation_periods=1,
            datapoints_to_alarm=1,
        )

        alarm_topic = sns.Topic(
            self,
            'AlarmTopic',
        )

        alarm_topic.add_subscription(
            sns_subscriptions.EmailSubscription(
                email_address=props['NotificationEmail'], ))

        alarm.add_alarm_action(cw_actions.SnsAction(alarm_topic))
예제 #18
0
    def SnsExporter(self, ZachRepositoryName):
        ZachSNStopic = sns.Topic(self,
                                 id=ZachRepositoryName + "SNS",
                                 display_name=ZachRepositoryName + "SNS",
                                 topic_name=ZachRepositoryName + "SNS")
        ZachSNStopicFilterPolicy = sns.SubscriptionFilter

        ZachSNStopic.add_subscription(
            sns_sub.EmailSubscription(
                email_address="*****@*****.**",
                filter_policy={
                    'status':
                    ZachSNStopicFilterPolicy.string_filter(
                        whitelist=['delete', 'not exist'])
                }))

        return ZachSNStopic
예제 #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create sns topic and subscription
        mytopic = _sns.Topic(self,
                             "mytopic",
                             display_name="latest topics",
                             topic_name="mytopic")
        mytopic.add_subscription(_subs.EmailSubscription("*****@*****.**"))

        # create sqs
        myqueue = _sqs.Queue(
            self,
            "myqueue",
            queue_name="myqueue.fifo",
            fifo=True,
            encryption=_sqs.QueueEncryption.KMS_MANAGED,
            retention_period=core.Duration.days(4),
            visibility_timeout=core.Duration.seconds(45),
        )
예제 #20
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        my_s3_bucket = _s3.Bucket(self, id='myS3Bucket')

        my_sns_topic = _sns.Topic(\
            self,
            id = 'demoTopic'
        )

        my_sns_sub = _sns_subscriptions.EmailSubscription(
            "*****@*****.**")
        my_sns_sub.bind(my_sns_topic)

        my_function = _lambda.Function(self,
                                       id='demoFunction',
                                       code=_lambda.Code.asset(r'../src'),
                                       handler='fun01.handler')
        trigger_event = _event.S3EventSource(
            bucket=my_s3_bucket, events=[_s3.EventType.OBJECT_CREATED])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        mytopic = sns.Topic(
            self, "BillingAlert"
        )

        email_parameter = core.CfnParameter(self, "email-param")
        dailyBudget_parameter = core.CfnParameter(self, "DailyBudget")
        monthlyGrowthRate_parameter = core.CfnParameter(self, "MonthlyGrowthRate")
        S3CodePath_parameter = core.CfnParameter(self, "S3CodePath")

        emailAddress = getattr(email_parameter,"value_as_string")
        dailyBudget_value = getattr(dailyBudget_parameter,"value_as_string")
        monthlyGrowthRate_value = getattr(monthlyGrowthRate_parameter,"value_as_string")

        mytopic.add_subscription(subscriptions.EmailSubscription(emailAddress))
        myrole = iam.Role(self, "BillianAlertRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSNSFullAccess"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole"))
        myrole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchFullAccess "))

        function = awslambda.Function(self, "MyLambda",
            code=awslambda.Code.from_cfn_parameters(object_key_param=S3CodePath_parameter),
            handler="lambda_function.py",
            runtime=awslambda.Runtime.PYTHON_3_7,
            role = myrole,
            function_name= "BillingAlert",
            memory_size= 3000
            )
        function.add_environment("DailyBudget", dailyBudget_value)
        function.add_environment("MonthlyGrowthRate", monthlyGrowthRate_value)
        function.add_environment("SNSARN", getattr(mytopic,"topic_arn"))
        targetFunction = LambdaFunction(function)
        Rule(self, "ScheduleRuleForBillingAlert",
            schedule=Schedule.cron(minute="0", hour="4"),
            targets=[targetFunction]
        )
예제 #22
0
def add_sns_email_subscriptions(sns_topic: core.Construct,
                                subscriptions: dict) -> None:
    """
    add email subscriptions from config file to specified SNS topic
    :param sns_topic: topic to add email subscriptions to
    :param subscriptions: email subscriptions for this topic
    :return:
    """
    for subscription in subscriptions:
        email = subscription.get('email')
        if email:
            format_json = subscription.get('json', False)

            sns_topic.add_subscription(
                sns_subscriptions.EmailSubscription(email_address=email,
                                                    json=format_json))
            print('added sns email subscription {} to topic {}'.format(
                email, sns_topic.node.id),
                  file=sys.stderr)
        else:
            print('email attribute not found in subscription {}'.format(
                subscription),
                  file=sys.stderr)
예제 #23
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        # Setup SSM parameter of credentials, bucket_para, ignore_list
        ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        ssm_bucket_para = ssm.StringParameter(self,
                                              "s3bucket_serverless",
                                              string_value=json.dumps(
                                                  bucket_para, indent=4))

        ssm_parameter_ignore_list = ssm.StringParameter(
            self, "s3_migrate_ignore_list", string_value=ignore_list)

        # Setup DynamoDB
        ddb_file_list = ddb.Table(self,
                                  "s3migrate_serverless",
                                  partition_key=ddb.Attribute(
                                      name="Key",
                                      type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        ddb_file_list.add_global_secondary_index(
            partition_key=ddb.Attribute(name="desBucket",
                                        type=ddb.AttributeType.STRING),
            index_name="desBucket-index",
            projection_type=ddb.ProjectionType.INCLUDE,
            non_key_attributes=["desKey", "versionId"])

        # Setup SQS
        sqs_queue_DLQ = sqs.Queue(self,
                                  "s3migrate_serverless_Q_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14))
        sqs_queue = sqs.Queue(self,
                              "s3migrate_serverless_Q",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=60, queue=sqs_queue_DLQ))

        # Setup API for Lambda to get IP address (for debug networking routing purpose)
        checkip = api.RestApi(
            self,
            "lambda-checkip-api",
            cloud_watch_role=True,
            deploy=True,
            description="For Lambda get IP address",
            default_integration=api.MockIntegration(
                integration_responses=[
                    api.IntegrationResponse(status_code="200",
                                            response_templates={
                                                "application/json":
                                                "$context.identity.sourceIp"
                                            })
                ],
                request_templates={"application/json": '{"statusCode": 200}'}),
            endpoint_types=[api.EndpointType.REGIONAL])
        checkip.root.add_method("GET",
                                method_responses=[
                                    api.MethodResponse(
                                        status_code="200",
                                        response_models={
                                            "application/json":
                                            api.Model.EMPTY_MODEL
                                        })
                                ])

        # Setup Lambda functions
        handler = lam.Function(self,
                               "s3-migrate-worker",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function_worker.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               tracing=lam.Tracing.ACTIVE,
                               environment={
                                   'table_queue_name':
                                   ddb_file_list.table_name,
                                   'Des_bucket_default': Des_bucket_default,
                                   'Des_prefix_default': Des_prefix_default,
                                   'StorageClass': StorageClass,
                                   'checkip_url': checkip.url,
                                   'ssm_parameter_credentials':
                                   ssm_parameter_credentials,
                                   'JobType': JobType,
                                   'MaxRetry': MaxRetry,
                                   'MaxThread': MaxThread,
                                   'MaxParallelFile': MaxParallelFile,
                                   'JobTimeout': JobTimeout,
                                   'UpdateVersionId': UpdateVersionId,
                                   'GetObjectWithVersionId':
                                   GetObjectWithVersionId
                               })

        handler_jobsender = lam.Function(
            self,
            "s3-migrate-jobsender",
            code=lam.Code.asset("./lambda"),
            handler="lambda_function_jobsender.lambda_handler",
            runtime=lam.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.minutes(15),
            tracing=lam.Tracing.ACTIVE,
            environment={
                'table_queue_name': ddb_file_list.table_name,
                'StorageClass': StorageClass,
                'checkip_url': checkip.url,
                'sqs_queue': sqs_queue.queue_name,
                'ssm_parameter_credentials': ssm_parameter_credentials,
                'ssm_parameter_ignore_list':
                ssm_parameter_ignore_list.parameter_name,
                'ssm_parameter_bucket': ssm_bucket_para.parameter_name,
                'JobType': JobType,
                'MaxRetry': MaxRetry,
                'JobsenderCompareVersionId': JobsenderCompareVersionId
            })

        # Allow lambda read/write DDB, SQS
        ddb_file_list.grant_read_write_data(handler)
        ddb_file_list.grant_read_write_data(handler_jobsender)
        sqs_queue.grant_send_messages(handler_jobsender)
        # SQS trigger Lambda worker
        handler.add_event_source(SqsEventSource(sqs_queue, batch_size=1))

        # Option1: Create S3 Bucket, all new objects in this bucket will be transmitted by Lambda Worker
        s3bucket = s3.Bucket(self, "s3_new_migrate")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # Option2: Allow Exist S3 Buckets to be read by Lambda functions.
        # Lambda Jobsender will scan and compare the these buckets and trigger Lambda Workers to transmit
        bucket_name = ''
        for b in bucket_para:
            if bucket_name != b['src_bucket']:  # 如果列了多个相同的Bucket,就跳过
                bucket_name = b['src_bucket']
                s3exist_bucket = s3.Bucket.from_bucket_name(
                    self,
                    bucket_name,  # 用这个做id
                    bucket_name=bucket_name)
                if JobType == 'PUT':
                    s3exist_bucket.grant_read(handler_jobsender)
                    s3exist_bucket.grant_read(handler)
                else:  # 'GET' mode
                    s3exist_bucket.grant_read_write(handler_jobsender)
                    s3exist_bucket.grant_read_write(handler)

        # Allow Lambda read ssm parameters
        ssm_bucket_para.grant_read(handler_jobsender)
        ssm_credential_para.grant_read(handler)
        ssm_credential_para.grant_read(handler_jobsender)
        ssm_parameter_ignore_list.grant_read(handler_jobsender)

        # Schedule cron event to trigger Lambda Jobsender per hour:
        event.Rule(self,
                   'cron_trigger_jobsender',
                   schedule=event.Schedule.rate(core.Duration.hours(1)),
                   targets=[target.LambdaFunction(handler_jobsender)])

        # TODO: Trigger event imediately, add custom resource lambda to invoke handler_jobsender

        # Create Lambda logs filter to create network traffic metric
        handler.log_group.add_metric_filter(
            "Completed-bytes",
            metric_name="Completed-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Complete", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Uploading-bytes",
            metric_name="Uploading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Uploading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Downloading-bytes",
            metric_name="Downloading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Downloading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "MaxMemoryUsed",
            metric_name="MaxMemoryUsed",
            metric_namespace="s3_migrate",
            metric_value="$memory",
            filter_pattern=logs.FilterPattern.literal(
                '[head="REPORT", a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, '
                'a13, a14, a15, a16, memory, MB="MB", rest]'))
        lambda_metric_Complete = cw.Metric(namespace="s3_migrate",
                                           metric_name="Completed-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_Upload = cw.Metric(namespace="s3_migrate",
                                         metric_name="Uploading-bytes",
                                         statistic="Sum",
                                         period=core.Duration.minutes(1))
        lambda_metric_Download = cw.Metric(namespace="s3_migrate",
                                           metric_name="Downloading-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_MaxMemoryUsed = cw.Metric(
            namespace="s3_migrate",
            metric_name="MaxMemoryUsed",
            statistic="Maximum",
            period=core.Duration.minutes(1))
        handler.log_group.add_metric_filter(
            "ERROR",
            metric_name="ERROR-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"ERROR"'))
        handler.log_group.add_metric_filter(
            "WARNING",
            metric_name="WARNING-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"WARNING"'))
        # Task timed out
        handler.log_group.add_metric_filter(
            "TIMEOUT",
            metric_name="TIMEOUT-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"Task timed out"'))
        log_metric_ERROR = cw.Metric(namespace="s3_migrate",
                                     metric_name="ERROR-Logs",
                                     statistic="Sum",
                                     period=core.Duration.minutes(1))
        log_metric_WARNING = cw.Metric(namespace="s3_migrate",
                                       metric_name="WARNING-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))
        log_metric_TIMEOUT = cw.Metric(namespace="s3_migrate",
                                       metric_name="TIMEOUT-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))

        # Dashboard to monitor SQS and Lambda
        board = cw.Dashboard(self, "s3_migrate_serverless")

        board.add_widgets(
            cw.GraphWidget(title="Lambda-NETWORK",
                           left=[
                               lambda_metric_Download, lambda_metric_Upload,
                               lambda_metric_Complete
                           ]),
            cw.GraphWidget(title="Lambda-concurrent",
                           left=[
                               handler.metric(
                                   metric_name="ConcurrentExecutions",
                                   period=core.Duration.minutes(1))
                           ]),
            cw.GraphWidget(
                title="Lambda-invocations/errors/throttles",
                left=[
                    handler.metric_invocations(
                        period=core.Duration.minutes(1)),
                    handler.metric_errors(period=core.Duration.minutes(1)),
                    handler.metric_throttles(period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="Lambda-duration",
                left=[
                    handler.metric_duration(period=core.Duration.minutes(1))
                ]),
        )

        board.add_widgets(
            cw.GraphWidget(title="Lambda_MaxMemoryUsed(MB)",
                           left=[lambda_metric_MaxMemoryUsed]),
            cw.GraphWidget(title="ERROR/WARNING Logs",
                           left=[log_metric_ERROR],
                           right=[log_metric_WARNING, log_metric_TIMEOUT]),
            cw.GraphWidget(
                title="SQS-Jobs",
                left=[
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.SingleValueWidget(
                title="Running/Waiting and Dead Jobs",
                metrics=[
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1))
                ],
                height=6))
        # Alarm for queue - DLQ
        alarm_DLQ = cw.Alarm(
            self,
            "SQS_DLQ",
            metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
            ),
            threshold=0,
            comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
            evaluation_periods=1,
            datapoints_to_alarm=1)
        alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter")
        alarm_topic.add_subscription(
            subscription=sub.EmailSubscription(alarm_email))
        alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic))

        core.CfnOutput(self,
                       "Dashboard",
                       value="CloudWatch Dashboard name s3_migrate_serverless")
예제 #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            constraint_description="Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320,
        )
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description="Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3,
        )
        assets_bucket_name = core.CfnParameter(
            self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3
        )
        endpoint_name = core.CfnParameter(
            self, "ENDPOINT_NAME", type="String", description="The name of the ednpoint to monitor", min_length=1
        )
        baseline_job_output_location = core.CfnParameter(
            self,
            "BASELINE_JOB_OUTPUT_LOCATION",
            type="String",
            description="S3 prefix to store the Data Baseline Job's output.",
        )
        monitoring_output_location = core.CfnParameter(
            self,
            "MONITORING_OUTPUT_LOCATION",
            type="String",
            description="S3 prefix to store the Monitoring Schedule output.",
        )
        schedule_expression = core.CfnParameter(
            self,
            "SCHEDULE_EXPRESSION",
            type="String",
            description="cron expression to run the monitoring schedule. E.g., cron(0 * ? * * *), cron(0 0 ? * * *), etc.",
            allowed_pattern="^cron(\\S+\\s){5}\\S+$",
        )
        training_data = core.CfnParameter(
            self,
            "TRAINING_DATA",
            type="String",
            description="Location of the training data in PipelineAssets S3 Bucket.",
        )
        instance_type = core.CfnParameter(
            self,
            "INSTANCE_TYPE",
            type="String",
            description="Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=7,
        )
        instance_volume_size = core.CfnParameter(
            self,
            "INSTANCE_VOLUME_SIZE",
            type="Number",
            description="Instance volume size used in model moniroing jobs. E.g., 20",
        )
        monitoring_type = core.CfnParameter(
            self,
            "MONITORING_TYPE",
            type="String",
            allowed_values=["dataquality", "modelquality", "modelbias", "modelexplainability"],
            default="dataquality",
            description="Type of model monitoring. Possible values: DataQuality | ModelQuality | ModelBias | ModelExplainability ",
        )
        max_runtime_seconds = core.CfnParameter(
            self,
            "MAX_RUNTIME_SIZE",
            type="Number",
            description="Max runtime in secodns the job is allowed to run. E.g., 3600",
        )
        baseline_job_name = core.CfnParameter(
            self,
            "BASELINE_JOB_NAME",
            type="String",
            description="Unique name of the data baseline job",
            min_length=3,
            max_length=63,
        )
        monitoring_schedule_name = core.CfnParameter(
            self,
            "MONITORING_SCHEDULE_NAME",
            type="String",
            description="Unique name of the monitoring schedule job",
            min_length=3,
            max_length=63,
        )
        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_model_monitor(training_data, assets_bucket)

        # deploy stage
        # creating data baseline job
        baseline_lambda_arn, create_baseline_job_definition = create_data_baseline_job(
            self,
            blueprint_bucket,
            assets_bucket,
            baseline_job_name,
            training_data,
            baseline_job_output_location,
            endpoint_name,
            instance_type,
            instance_volume_size,
            max_runtime_seconds,
            core.Aws.STACK_NAME,
        )
        # creating monitoring schedule
        monitor_lambda_arn, create_monitoring_schedule_definition = create_monitoring_schedule(
            self,
            blueprint_bucket,
            assets_bucket,
            baseline_job_output_location,
            baseline_job_name,
            monitoring_schedule_name,
            monitoring_output_location,
            schedule_expression,
            endpoint_name,
            instance_type,
            instance_volume_size,
            max_runtime_seconds,
            monitoring_type,
            core.Aws.STACK_NAME,
        )
        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[baseline_lambda_arn, monitor_lambda_arn],
        )
        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])
        deploy_stage_model_monitor = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[
                create_baseline_job_definition,
                create_monitoring_schedule_definition,
            ],
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "ModelMonitorPipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # constructing Model Monitor pipelines
        model_monitor_pipeline = codepipeline.Pipeline(
            self,
            "ModelMonitorPipeline",
            stages=[source_stage, deploy_stage_model_monitor],
            cross_account_keys=False,
        )
        model_monitor_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        model_monitor_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )
        # add lambda permissons
        model_monitor_pipeline.add_to_role_policy(invoke_lambdas_policy)

        pipeline_child_nodes = model_monitor_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[24].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(model_monitor_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="MonitorPipeline",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{model_monitor_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )

        core.CfnOutput(
            self,
            id="DataBaselineJobName",
            value=baseline_job_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="MonitoringScheduleJobName",
            value=monitoring_schedule_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="MonitoringScheduleType",
            value=monitoring_type.value_as_string,
        )
예제 #25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        NOTIFY_EMAIL = self.node.try_get_context("notify_email")
        SLACK_WEBHOOK_URL = self.node.try_get_context("webhook_url")
        WHITE_LIST_GROUP = self.node.try_get_context("white_list_group")

        if (not NOTIFY_EMAIL or not SLACK_WEBHOOK_URL or not WHITE_LIST_GROUP):
            logger.error(
                f"Required context variables for {id} were not provided!")
        else:

            # 1. Create Response Lambda
            lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks",
                                           "in_clt_01")
            response_lambda = _lambda.Function(
                self,
                "InClt01ResponseFunction",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler="clUnauthAccessResponse.lambda_handler",
                code=_lambda.Code.from_asset(lambda_dir_path),
                function_name="InClt01ResponseFunction",
                environment={
                    "webhook_url": SLACK_WEBHOOK_URL,
                    "white_list_group": WHITE_LIST_GROUP,
                })

            ep = {"source": ["aws.cloudtrail"]}

            # 2. Make that rule Track Cloudtrail events
            rule = events.Rule(
                self,
                "cdkRule",
                description=
                'Rule created by CDK for monitoring CloudTrail access',
                enabled=True,
                rule_name="CltAccessRule",
                event_pattern=ep)

            # 3. Add Permissions and role to Lambda
            action = [
                "iam:*", "organizations:DescribeAccount",
                "organizations:DescribeOrganization",
                "organizations:DescribeOrganizationalUnit",
                "organizations:DescribePolicy", "organizations:ListChildren",
                "organizations:ListParents",
                "organizations:ListPoliciesForTarget",
                "organizations:ListRoots", "organizations:ListPolicies",
                "organizations:ListTargetsForPolicy"
            ]

            response_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=action,
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            # 4. Permission to send SNS notification
            response_lambda.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["sns:*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                ))

            # 5. Add Lambda as target of Rule
            rule.add_target(event_target.LambdaFunction(response_lambda))

            # 6. Create SNS topic and subscription
            topic = sns.Topic(self, "CLTAccessCDK", topic_name="CLTAccessCDK")
            # topic.grant_publish(iam.ServicePrincipal("*"))
            topic.add_subscription(subs.EmailSubscription(NOTIFY_EMAIL))

            # 7. Create IAM allow/deny policy
            cltDenyAccessPolicy = iam.ManagedPolicy(
                self,
                "InCLT01DenyPolicy",
                managed_policy_name="CltDenyAccess",
                statements=[
                    iam.PolicyStatement(effect=iam.Effect.DENY,
                                        actions=["cloudtrail:*"],
                                        resources=["*"])
                ])

            # 8. Create IAM group
            cltAccessGroup = iam.Group(self,
                                       "cltAccessGroup",
                                       group_name="cltAccessGroup")
예제 #26
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        kinesis_firehose: kf.CfnDeliveryStream,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Cloudwatch alarm for stale data
        snstopic = sns.Topic(
            self,
            "SNSTopic",
        )

        snstopicpolicy = sns.CfnTopicPolicy(
            self,
            "SNSTopicPolicy",
            policy_document={
                "Version":
                "2008-10-17",
                "Id":
                "__default_policy_ID",
                "Statement": [{
                    "Sid":
                    "__default_statement_ID",
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action": [
                        "SNS:GetTopicAttributes",
                        "SNS:SetTopicAttributes",
                        "SNS:AddPermission",
                        "SNS:RemovePermission",
                        "SNS:DeleteTopic",
                        "SNS:Subscribe",
                        "SNS:ListSubscriptionsByTopic",
                        "SNS:Publish",
                        "SNS:Receive",
                    ],
                    "Resource":
                    snstopic.topic_arn,
                    "Condition": {
                        "StringEquals": {
                            "AWS:SourceOwner": self.account
                        }
                    },
                }],
            },
            topics=[snstopic.topic_arn],
        )

        snstopic.add_subscription(
            subscriptions.EmailSubscription(
                email_address="*****@*****.**", ))

        cloudwatch_alarm = cloudwatch.CfnAlarm(
            self,
            "CloudWatchAlarm",
            alarm_name="Stale Data Alarm",
            alarm_description=
            "Notification that data has gone stale in the S3 bucket",
            actions_enabled=True,
            alarm_actions=[snstopic.topic_arn],
            metric_name="DeliveryToS3.DataFreshness",
            namespace="AWS/Firehose",
            statistic="Average",
            dimensions=[{
                "name": "DeliveryStreamName",
                "value": kinesis_firehose.ref,
            }],
            period=86400,
            evaluation_periods=1,
            datapoints_to_alarm=1,
            threshold=86400,
            comparison_operator="GreaterThanThreshold",
            treat_missing_data="breaching",
        )
예제 #27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_repository = aws_codecommit.Repository(
            self,
            "QuestionsLambdaRepository",
            repository_name="MythicalMysfits-QuestionsLambdaRepository",
        )

        core.CfnOutput(
            self,
            "questionsRepositoryCloneUrlHTTP",
            value=lambda_repository.repository_clone_url_http,
            description="Questions Lambda Repository Clone URL HTTP",
        )
        core.CfnOutput(
            self,
            "questionsRepositoryCloneUrlSSH",
            value=lambda_repository.repository_clone_url_ssh,
            description="Questions Lambda Repository Clone URL SSH",
        )

        table = aws_dynamodb.Table(
            self,
            "Table",
            table_name="MysfitsQuestionsTable",
            partition_key=aws_dynamodb.Attribute(
                name="QuestionId", type=aws_dynamodb.AttributeType.STRING),
            stream=aws_dynamodb.StreamViewType.NEW_IMAGE,
        )

        lambda_function_policy_statement_ddb = aws_iam.PolicyStatement()
        lambda_function_policy_statement_ddb.add_actions("dynamodb:PutItem")
        lambda_function_policy_statement_ddb.add_resources(table.table_arn)

        lambda_function_policy_statement_xray = aws_iam.PolicyStatement()
        lambda_function_policy_statement_xray.add_actions(
            "xray:PutTraceSegments",
            "xray:PutTelemetryRecords",
            "xray:GetSamplingRules",
            "xray:GetSamplingTargets",
            "xray:GetSamplingStatisticSummaries",
        )
        lambda_function_policy_statement_xray.add_all_resources()

        mysfits_post_question = aws_lambda.Function(
            self,
            "PostQuestionFunction",
            handler="mysfitsPostQuestion.postQuestion",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            description=
            "A microservice Lambda function that receives a new question submitted to the MythicalMysfits website from a user and inserts it into a DynamoDB database table.",
            memory_size=128,
            code=aws_lambda.Code.asset(
                os.path.join("..", "..", "lambda-questions",
                             "PostQuestionsService")),
            timeout=core.Duration.seconds(30),
            initial_policy=[
                lambda_function_policy_statement_ddb,
                lambda_function_policy_statement_xray,
            ],
            tracing=aws_lambda.Tracing.ACTIVE,
        )

        topic = aws_sns.Topic(
            self,
            "Topic",
            display_name="MythicalMysfitsQuestionsTopic",
            topic_name="MythicalMysfitsQuestionsTopic",
        )
        topic.add_subscription(subs.EmailSubscription(os.environ["SNS_EMAIL"]))

        post_question_lamdaa_function_policy_statement_sns = aws_iam.PolicyStatement(
        )
        post_question_lamdaa_function_policy_statement_sns.add_actions(
            "sns:Publish")
        post_question_lamdaa_function_policy_statement_sns.add_resources(
            topic.topic_arn)

        mysfits_process_question_stream = aws_lambda.Function(
            self,
            "ProcessQuestionStreamFunction",
            handler="mysfitsProcessStream.processStream",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            description=
            "An AWS Lambda function that will process all new questions posted to mythical mysfits and notify the site administrator of the question that was asked.",
            memory_size=128,
            code=aws_lambda.Code.asset(
                os.path.join("..", "..", "lambda-questions",
                             "ProcessQuestionsStream")),
            timeout=core.Duration.seconds(30),
            initial_policy=[
                post_question_lamdaa_function_policy_statement_sns,
                lambda_function_policy_statement_xray,
            ],
            tracing=aws_lambda.Tracing.ACTIVE,
            environment={"SNS_TOPIC_ARN": topic.topic_arn},
            events=[
                event.DynamoEventSource(
                    table,
                    starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
                    batch_size=1,
                )
            ],
        )

        questions_api_role = aws_iam.Role(
            self,
            "QuestionsApiRole",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
        )

        api_policy = aws_iam.PolicyStatement()
        api_policy.add_actions("lambda:InvokeFunction")
        api_policy.add_resources(mysfits_post_question.function_arn)
        aws_iam.Policy(
            self,
            "QuestionsApiPolicy",
            policy_name="questions_api_policy",
            statements=[api_policy],
            roles=[questions_api_role],
        )

        questions_integration = aws_apigateway.LambdaIntegration(
            mysfits_post_question,
            credentials_role=questions_api_role,
            integration_responses=[
                aws_apigateway.IntegrationResponse(
                    status_code="200",
                    response_templates={
                        "application/json": '{"status": "OK"}'
                    },
                )
            ],
        )

        api = aws_apigateway.LambdaRestApi(
            self,
            "APIEndpoint",
            handler=mysfits_post_question,
            options=aws_apigateway.RestApiProps(
                rest_api_name="Questions API Server"),
            proxy=False,
        )

        questions_method = api.root.add_resource("questions")
        questions_method.add_method(
            "POST",
            questions_integration,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ],
            authorization_type=aws_apigateway.AuthorizationType.NONE,
        )

        questions_method.add_method(
            "OPTIONS",
            aws_apigateway.MockIntegration(
                integration_responses=[
                    aws_apigateway.IntegrationResponse(
                        status_code="200",
                        response_parameters={
                            "method.response.header.Access-Control-Allow-Headers":
                            "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'",
                            "method.response.header.Access-Control-Allow-Origin":
                            "'*'",
                            "method.response.header.Access-Control-Allow-Credentials":
                            "'false'",
                            "method.response.header.Access-Control-Allow-Methods":
                            "'OPTIONS,GET,PUT,POST,DELETE'",
                        },
                    )
                ],
                passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER,
                request_templates={"application/json": '{"statusCode": 200}'},
            ),
            method_responses=[
                aws_apigateway.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Credentials":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True,
                    },
                )
            ],
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            constraint_description="Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320,
        )
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description="Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3,
        )
        assets_bucket_name = core.CfnParameter(
            self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3
        )
        custom_container = core.CfnParameter(
            self,
            "CUSTOM_CONTAINER",
            default="",
            type="String",
            description=(
                "Should point to a zip file containing dockerfile and assets for building a custom model. "
                "If empty it will beusing containers from SageMaker Registry"
            ),
        )
        model_framework = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK",
            default="",
            type="String",
            description="The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.",
        )
        model_framework_version = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK_VERSION",
            default="",
            type="String",
            description="The version of the ML framework which is used for training the model. E.g., 1.1-2",
        )
        model_name = core.CfnParameter(
            self, "MODEL_NAME", type="String", description="An arbitrary name for the model.", min_length=1
        )
        model_artifact_location = core.CfnParameter(
            self,
            "MODEL_ARTIFACT_LOCATION",
            type="String",
            description="Path to model artifact inside assets bucket.",
        )
        inference_instance = core.CfnParameter(
            self,
            "INFERENCE_INSTANCE",
            type="String",
            description="Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=7,
        )
        # Resources #

        # access_bucket = s3.Bucket.from_bucket_name(self, "AccessBucket", access_bucket_name.value_as_string)
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_custom(
            model_artifact_location, assets_bucket, custom_container
        )

        # build stage
        build_action_definition, container_uri = build_action(self, source_output)

        # deploy stage
        sm_layer = sagemaker_layer(self, blueprint_bucket)
        # creating a sagemaker model
        model_lambda_arn, create_model_definition = create_model(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            model_artifact_location,
            custom_container,
            model_framework,
            model_framework_version,
            container_uri,
            sm_layer,
        )
        # creating a sagemaker endpoint
        endpoint_lambda_arn, create_endpoint_definition = create_endpoint(
            self, blueprint_bucket, assets_bucket, model_name, inference_instance
        )
        # Share stage
        configure_lambda_arn, configure_inference_definition = configure_inference(self, blueprint_bucket)

        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[model_lambda_arn, endpoint_lambda_arn, configure_lambda_arn],
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])
        build_stage = codepipeline.StageProps(stage_name="Build", actions=[build_action_definition])
        deploy_stage_realtime = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[
                create_model_definition,
                create_endpoint_definition,
            ],
        )
        share_stage = codepipeline.StageProps(stage_name="Share", actions=[configure_inference_definition])

        realtime_build_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineReatimeBuild",
            stages=[source_stage, build_stage, deploy_stage_realtime, share_stage],
            cross_account_keys=False,
        )
        realtime_build_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        realtime_build_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )
        # add lambda permissons
        realtime_build_pipeline.add_to_role_policy(invoke_lambdas_policy)
        # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed
        # There is room for improving the method in future versions to find CDK nodes without having to use
        # hardocded index numbers
        pipeline_child_nodes = realtime_build_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[25].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[36].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(realtime_build_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{realtime_build_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )
        core.CfnOutput(
            self,
            id="SageMakerModelName",
            value=model_name.value_as_string,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointConfigName",
            value=f"{model_name.value_as_string}-endpoint-config",
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointName",
            value=f"{model_name.value_as_string}-endpoint",
        )
        core.CfnOutput(
            self,
            id="EndpointDataCaptureLocation",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_bucket.bucket_name}/datacapture",
            description="Endpoint data capture location (to be used by Model Monitor)",
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # load configs from "./comfigurations/config.json"
        configs = {}
        with open("./configurations/config.json") as json_file:
            configs = json.load(json_file)

        # Default lambdas for testing
        mem_list = configs['MemorySizeList']
        cold_start_lambdas = {}
        for mem in mem_list:
            python38_lambda = lambda_.Function(
                self,
                id="coldstart_python38_" + str(mem) + "_",
                runtime=lambda_.Runtime.PYTHON_3_8,
                handler="lambda_function.lambda_handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/python38"))
            cold_start_lambdas['PYTHON38_' + str(mem)] = python38_lambda

        for mem in mem_list:
            nodejs12x_lambda = lambda_.Function(
                self,
                id="coldstart_nodejs12x" + str(mem) + "_",
                runtime=lambda_.Runtime.NODEJS_12_X,
                handler="index.handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/nodejs12x"))
            cold_start_lambdas['NODEJS12X_' + str(mem)] = nodejs12x_lambda

        for mem in mem_list:
            go1x_lambda = lambda_.Function(
                self,
                id="coldstart_go1x" + str(mem) + "_",
                runtime=lambda_.Runtime.GO_1_X,
                handler="hello",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/go1x"))
            cold_start_lambdas['GO1X_' + str(mem)] = go1x_lambda

        for mem in mem_list:
            netcore31_lambda = lambda_.Function(
                self,
                id="coldstart_netcore31" + str(mem) + "_",
                runtime=lambda_.Runtime.DOTNET_CORE_3_1,
                handler="LambdaTest::LambdaTest.LambdaHandler::handleRequest",
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/netcore31"),
                memory_size=mem,
            )
            cold_start_lambdas['NETCORE31_' + str(mem)] = netcore31_lambda

        for mem in mem_list:
            java11corretto_lambda = lambda_.Function(
                self,
                id="coldstart_java11corretto" + str(mem) + "_",
                runtime=lambda_.Runtime.JAVA_11,
                handler="example.Hello::handleRequest",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/java11corretto"))
            cold_start_lambdas['JAVA11_' + str(mem)] = java11corretto_lambda

        for mem in mem_list:
            ruby27_lambda = lambda_.Function(
                self,
                id="coldstart_ruby27" + str(mem) + "_",
                runtime=lambda_.Runtime.RUBY_2_7,
                handler="lambda_function.lambda_handler",
                memory_size=mem,
                tracing=lambda_.Tracing.ACTIVE,
                code=lambda_.Code.asset("./cold_start_lambdas/ruby27"))
            cold_start_lambdas['RUBY27_' + str(mem)] = ruby27_lambda

        # Caller
        cold_start_caller = lambda_.Function(
            self,
            id="cold_start_caller",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="ColdStartCaller.lambda_handler",
            code=lambda_.Code.asset("./cold_start_lambdas/cold_start_caller"),
            timeout=core.Duration.seconds(180))
        cold_start_caller.role.add_managed_policy(
            iam_.ManagedPolicy.from_aws_managed_policy_name(
                "AWSXrayReadOnlyAccess"))
        cold_start_caller.role.add_to_policy(
            iam_.PolicyStatement(effect=iam_.Effect.ALLOW,
                                 actions=['lambda:GetFunctionConfiguration'],
                                 resources=["*"]))
        for lambda_name in cold_start_lambdas:
            cold_start_caller.add_environment(
                lambda_name, cold_start_lambdas[lambda_name].function_arn)
            cold_start_lambdas[lambda_name].grant_invoke(cold_start_caller)

        # DynamoDB
        cold_start_table = dynamodb_.Table(
            self,
            id="cold_start_benchmark_table",
            partition_key=dynamodb_.Attribute(
                name="PK", type=dynamodb_.AttributeType.STRING),
            sort_key=dynamodb_.Attribute(name="SK",
                                         type=dynamodb_.AttributeType.NUMBER),
            time_to_live_attribute="TTL")
        cold_start_table.grant_write_data(cold_start_caller)
        cold_start_caller.add_environment('TABLE_NAME',
                                          cold_start_table.table_name)

        # S3
        life_cycle_rule = s3_.LifecycleRule(transitions=[
            s3_.Transition(storage_class=s3_.StorageClass.INFREQUENT_ACCESS,
                           transition_after=core.Duration.days(30))
        ])
        cold_start_backup_s3 = s3_.Bucket(self,
                                          "cold_start_benchmark_backup",
                                          lifecycle_rules=[life_cycle_rule])
        cold_start_backup_s3.grant_write(cold_start_caller)
        cold_start_caller.add_environment('BACKUP_BUCKET_NAME',
                                          cold_start_backup_s3.bucket_name)

        # CW event
        cron_job = events_.Rule(
            self,
            "cold_start_caller_cron_job",
            description="Run cold start caller twice every 1 hour",
            schedule=events_.Schedule.cron(minute="0,1"),
            targets=[targets_.LambdaFunction(cold_start_caller)])

        # alarm when caller failed, send email for notification
        errorAlarm = cloudwatch_.Alarm(
            self,
            "cold_start_caller_error_alarm",
            metric=cloudwatch_.Metric(
                metric_name="Errors",
                namespace="AWS/Lambda",
                period=core.Duration.minutes(5),
                statistic="Maximum",
                dimensions={"FunctionName": cold_start_caller.function_name}),
            evaluation_periods=1,
            datapoints_to_alarm=1,
            threshold=1,
            actions_enabled=True,
            alarm_description="Alarm when cold start caller failed",
            alarm_name="cold_start_caller_errer_alarm",
            comparison_operator=cloudwatch_.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch_.TreatMissingData.MISSING)
        cold_start_caller_error_alarm_topic = sns_.Topic(
            self,
            "cold_start_caller_error_alarm_topic",
            display_name="ColdStartCallerErrorAlarmTopic",
            topic_name="ColdStartCallerErrorAlarmTopic")
        cold_start_caller_error_alarm_topic.add_subscription(
            sns_subs_.EmailSubscription(
                configs['AlarmNotificationEmailAddress']))
        errorAlarm.add_alarm_action(
            cloudwatch_actions_.SnsAction(cold_start_caller_error_alarm_topic))

        # Summarizer
        cold_start_summarizer = lambda_.Function(
            self,
            id="cold_start_summarizer",
            runtime=lambda_.Runtime.PYTHON_3_8,
            handler="ColdStartSummarizer.lambda_handler",
            code=lambda_.Code.asset(
                "./cold_start_lambdas/cold_start_summarizer"),
            timeout=core.Duration.seconds(10))
        cold_start_table.grant_read_write_data(cold_start_summarizer)
        cold_start_summarizer.add_environment('TABLE_NAME',
                                              cold_start_table.table_name)

        # setup CW event for summarizer
        cron_job_summarizer = events_.Rule(
            self,
            "cold_start_summarizer_cron_job",
            description="Run cold start summarizer once every day",
            schedule=events_.Schedule.cron(minute='30', hour='0'),
            targets=[targets_.LambdaFunction(cold_start_summarizer)])

        # error alarm for summarizer
        errorAlarm_summarizer = cloudwatch_.Alarm(
            self,
            "cold_start_summarizer_error_alarm",
            metric=cloudwatch_.Metric(metric_name='Errors',
                                      namespace='AWS/Lambda',
                                      period=core.Duration.minutes(5),
                                      statistic='Maximum',
                                      dimensions={
                                          'FunctionName':
                                          cold_start_summarizer.function_name
                                      }),
            evaluation_periods=1,
            datapoints_to_alarm=1,
            threshold=1,
            actions_enabled=True,
            alarm_description="Alarm when cold start summarizer failed",
            alarm_name="cold_start_summarizer_errer_alarm",
            comparison_operator=cloudwatch_.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch_.TreatMissingData.MISSING)
        cold_start_summarizer_error_alarm_topic = sns_.Topic(
            self,
            "cold_start_summarizer_error_alarm_topic",
            display_name="ColdStartSummarizerErrorAlarmTopic",
            topic_name="ColdStartSummarizerErrorAlarmTopic")
        cold_start_summarizer_error_alarm_topic.add_subscription(
            sns_subs_.EmailSubscription(
                configs['AlarmNotificationEmailAddress']))
        errorAlarm_summarizer.add_alarm_action(
            cloudwatch_actions_.SnsAction(
                cold_start_summarizer_error_alarm_topic))

        # GraphQL API
        graphql_api = appsync_.GraphqlApi(
            self,
            "cold_start_benchmark_graphql_api",
            name="cold_start_benchmark_graphql_api",
            authorization_config=appsync_.AuthorizationConfig(
                default_authorization=appsync_.AuthorizationMode(
                    authorization_type=appsync_.AuthorizationType.API_KEY,
                    api_key_config=appsync_.ApiKeyConfig(
                        description="cold_start_benchmark_graphql_api_key",
                        expires=core.Expiration.after(core.Duration.days(365)),
                        name="cold_start_benchmark_graphql_api_key"))),
            schema=appsync_.Schema.from_asset(
                './cold_start_benchmark/graphql_schema/schema.graphql'),
            xray_enabled=True)
        dynamodb_data_source = graphql_api.add_dynamo_db_data_source(
            id="cold_start_dynamodb_data_source", table=cold_start_table)
        dynamodb_data_source.create_resolver(
            field_name="listColdStartSummariesAfterTimestamp",
            type_name="Query",
            request_mapping_template=appsync_.MappingTemplate.from_file(
                './cold_start_benchmark/graphql_schema/request_mapping_template'
            ),
            response_mapping_template=appsync_.MappingTemplate.from_file(
                './cold_start_benchmark/graphql_schema/response_mapping_template'
            ))

        front_end_amplify_app = amplify_.App(
            self,
            "cold-start-front-end",
            app_name="cold_start_front_end",
            source_code_provider=amplify_.GitHubSourceCodeProvider(
                owner="ZzzGin",
                repository="cold-start-frontend-website",
                oauth_token=core.SecretValue.secrets_manager(
                    "zzzgin/github/token", json_field="zzzgin-github-token")))
        master_Branch = front_end_amplify_app.add_branch("master")
        domain = front_end_amplify_app.add_domain('zzzgin.com')
        domain.map_sub_domain(master_Branch, 'coldstart')
예제 #30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            constraint_description=
            "Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320)
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description=
            "Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3)
        access_bucket_name = core.CfnParameter(
            self,
            "ACCESS_BUCKET",
            type="String",
            description="Bucket name for access logs.",
            min_length=3)
        custom_container = core.CfnParameter(
            self,
            "CUSTOM_CONTAINER",
            type="String",
            description=
            "Should point to a zip file containing dockerfile and assets for building a custom model. If empty it will beusing containers from SageMaker Registry",
        )
        model_framework = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK",
            type="String",
            description=
            "The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.",
        )
        model_framework_version = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK_VERSION",
            type="String",
            description=
            "The version of the ML framework which is used for training the model. E.g., 1.1-2",
        )
        model_name = core.CfnParameter(
            self,
            "MODEL_NAME",
            type="String",
            description="An arbitrary name for the model.",
            min_length=1)
        model_artifact_location = core.CfnParameter(
            self,
            "MODEL_ARTIFACT_LOCATION",
            type="String",
            description="Path to model artifact inside assets bucket.",
        )
        training_data = core.CfnParameter(
            self,
            "TRAINING_DATA",
            type="String",
            description=
            "Location of the training data in PipelineAssets S3 Bucket.",
        )
        inference_instance = core.CfnParameter(
            self,
            "INFERENCE_INSTANCE",
            type="String",
            description=
            "Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern='^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            min_length=7)
        inference_type = core.CfnParameter(
            self,
            "INFERENCE_TYPE",
            type="String",
            allowed_values=["batch", "realtime"],
            default="realtime",
            description="Type of inference. Possible values: batch | realtime",
        )
        batch_inference_data = core.CfnParameter(
            self,
            "BATCH_INFERENCE_DATA",
            type="String",
            default="",
            description=
            "Location of batch inference data if inference type is set to batch. Otherwise, can be left empty.",
        )

        # Resources #

        access_bucket = s3.Bucket.from_bucket_name(
            self, "AccessBucket", access_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(
            self, "BlueprintBucket", blueprint_bucket_name.value_as_string)
        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_bucket,
            server_access_logs_prefix="BYOMBatchBuiltinStack",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        assets_bucket.node.default_child.cfn_options.metadata = suppress_assets_bucket(
        )

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action(
            model_artifact_location, assets_bucket)

        # deploy stage
        sm_layer = sagemaker_layer(self, blueprint_bucket)
        # creating a sagemaker model
        create_model_definition = create_model(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            model_artifact_location,
            custom_container,
            model_framework,
            model_framework_version,
            "",
            sm_layer,
        )
        # creating a batch transform job
        batch_transform_definition = batch_transform(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            inference_instance,
            batch_inference_data,
            sm_layer,
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # createing pipeline stages
        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        deploy_stage_batch = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[create_model_definition, batch_transform_definition],
        )

        batch_nobuild_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineBatchBuiltIn",
            stages=[source_stage, deploy_stage_batch],
            cross_account_keys=False,
        )
        pipeline_rule = batch_nobuild_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            event_pattern=events.EventPattern(
                detail={'state': ['SUCCEEDED', 'FAILED']}),
        )
        batch_nobuild_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ]))
        # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed
        # There is room for improving the method in future versions to find CDK nodes without having to use
        # hardocded index numbers
        pipeline_child_nodes = batch_nobuild_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        pipeline_child_nodes[
            13].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            19].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            24].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        # pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipeline
        pipeline_permissions(batch_nobuild_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(self,
                       id="AssetsBucket",
                       value='https://s3.console.aws.amazon.com/s3/buckets/' +
                       assets_bucket.bucket_name,
                       description="S3 Bucket to upload model artifact")
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            f'https://console.aws.amazon.com/codesuite/codepipeline/pipelines/{batch_nobuild_pipeline.pipeline_name}/view?region={core.Aws.REGION}'
        )