Beispiel #1
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 queue_context: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        q = dict(self.node.try_get_context(queue_context))

        queue_dlq = _sqs.Queue(self,
                               q["queue_dlq_name"],
                               queue_name=q["queue_dlq_name"])

        queue = _sqs.Queue(
            self,
            q["queue_name"],
            queue_name=q["queue_name"],
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=q["queue_dlq_max_receive_count"],
                queue=queue_dlq),
            encryption=_sqs.QueueEncryption.KMS_MANAGED,
            visibility_timeout=Duration.seconds(30),
            delivery_delay=Duration.seconds(15),
            retention_period=Duration.hours(14),
        )

        self.queue = queue
        self.queue_dlq = queue_dlq

        # Outputs

        core.CfnOutput(self, "QueueUrl", value=queue.queue_url)
Beispiel #2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        stream = kds.Stream(self,
                            "InputStream",
                            shard_count=1,
                            retention_period=Duration.hours(24))

        self._stream = stream
    def __init__(self, scope: Construct, id: str, functions: LambdaLib, **kwargs) -> None:
        super().__init__(scope, id)

        # Step Function
        submit_job = tasks.LambdaInvoke(self, "Submit Job",
            lambda_function=functions.send_email_approval,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path=sfn.JsonPath.DISCARD
        )

        wait_x = sfn.Wait(self, "Wait",
            time= sfn.WaitTime.duration(Duration.minutes(2))
        )

        get_status = tasks.LambdaInvoke(self, "Get Job Status",
            lambda_function=functions.check_status_dynamo,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
            result_path="$.status"
        )

        restrict_es = tasks.LambdaInvoke(self, "Restric ES Policy",
            lambda_function=functions.restric_es_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_rds = tasks.LambdaInvoke(self, "Restric RDS",
            lambda_function=functions.restric_rds_policy,
            payload=sfn.TaskInput.from_object({'ExecutionContext.$': '$$'}),
        )

        restrict_es_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_ES_PUBLIC)
        restrict_rds_condition = sfn.Condition.string_equals("$.detail.additionalEventData.configRuleName", constants.CONFIG_RULE_RDS_PUBLIC)

        definition = (submit_job.next(wait_x)
                                .next(get_status)
                                .next(sfn.Choice(self, "Job Complete?")
                                .when(sfn.Condition.string_equals("$.status.Payload.status", "Rejected!"), wait_x)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "NON_COMPLIANT"), final_task)
                                # .when(sfn.Condition.string_equals("$.status.Payload.status", "Accepted!"), final_task))
                                .otherwise(sfn.Choice(self, "Remediation Choice")
                                .when(restrict_es_condition, restrict_es)
                                .when(restrict_rds_condition, restrict_rds)))
                                )


        self.state_machine = sfn.StateMachine(self, "StateMachine",
            definition=definition,
            timeout=Duration.hours(2)
        )
Beispiel #4
0
def base_kinesis_stream(construct, **kwargs):
    """
    Function that generates a Kinesis Data Stream.
    :param construct: Custom construct that will use this function. From the external construct is usually 'self'.
    :param kwargs: Consist of required 'stream_name'.
    :return: Kinesis Stream Construct.
    """
    stream_name = construct.prefix + "_" + kwargs[
        "stream_name"] + "_" + "stream" + "_" + construct.environment_
    stream_retention_period = Duration.hours(
        kwargs["retention_period"]) if kwargs.get(
            "retention_period") is not None else None
    kinesis_stream = stream.Stream(
        construct,
        id=stream_name,
        stream_name=stream_name,
        shard_count=kwargs["shard_count"],
        retention_period=stream_retention_period,
    )

    return kinesis_stream
Beispiel #5
0
    def add_low_efs_burst_credit_alarms(self, filesystem: FileSystem,
                                        email_address: str) -> None:
        '''
        Set up CloudWatch Alarms that will warn when the given filesystem's burst credits are below
        four different thresholds. We send an email to the given address when an Alarm breaches.
        '''
        # Set up the SNS Topic that will send the emails.
        # ====================
        # 1) KMS key to use to encrypt events within the SNS Topic. The Key is optional
        key = Key(
            self,
            'SNSEncryptionKey',
            description=
            'Used to encrypt the SNS Topic for sending EFS Burst Credit alerts',
            enable_key_rotation=True,
            removal_policy=RemovalPolicy.DESTROY,
            trust_account_identities=True)
        key.grant(ServicePrincipal('cloudwatch.amazonaws.com'), 'kms:Decrypt',
                  'kms:GenerateDataKey')

        # 2) SNS Topic that will be alerted by CloudWatch and will send the email in response.
        sns_topic = Topic(self, 'BurstAlertEmailTopic', master_key=key)
        sns_topic.grant_publish(ServicePrincipal('cloudwatch.amazonaws.com'))
        sns_topic.add_subscription(EmailSubscription(email_address))
        alarm_action = SnsAction(sns_topic)

        # Set up the CloudWatch Alarm(s) and have them trigger SNS events when breached.
        # ======================
        # 1) CDK helper to define the CloudWatch Metric that we're interested in.
        burst_credits_metric = Metric(
            metric_name='BurstCreditBalance',
            namespace='AWS/EFS',
            dimensions={"FileSystemId": filesystem.file_system_id},
            # One 99-th percentile data point sample every hour
            period=Duration.hours(1),
            statistic='p99')

        # 2) Create the alarms
        thresholds = [
            {
                "id": 'CAUTION-EfsBurstCredits',
                "name": f"CAUTION Burst Credits - {filesystem.file_system_id}",
                "threshold": int(2.00 * 2**40),
                "message":
                f"CAUTION. 2 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'WARNING-EfsBurstCredits',
                "name": f"WARNING Burst Credits - {filesystem.file_system_id}",
                "threshold": int(1.25 * 2**40),
                "message":
                f"WARNING. 1.25 TiB Threshold Breached: EFS {filesystem.file_system_id} is depleting burst credits. Add data to the EFS to increase baseline throughput.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'ALERT-EfsBurstCredits',
                "name": f"ALERT Burst Credits - {filesystem.file_system_id}",
                "threshold": int(0.50 * 2**40),
                "message":
                f"ALERT! 500 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm may cease operation.",
                # Alarm after 6 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 6hrs
                "datapoints": 6
            },
            {
                "id": 'EMERGENCY-EfsBurstCredits',
                "name":
                f"EMERGENCY Burst Credits - {filesystem.file_system_id}",
                "threshold": int(0.10 * 2**40),
                "message":
                f"EMERGENCY! 100 GiB Threshold Breached: EFS {filesystem.file_system_id} is running out of burst credits. Add data to the EFS to increase baseline throughput or else the Render Farm will cease operation.",
                # Alarm after 2 datapoints below threshold. We have 1 datapoint every hour. So, we alarm if below threshold for 2hrs
                "datapoints": 2
            },
        ]
        for config in thresholds:
            alarm = burst_credits_metric.create_alarm(
                self,
                config['id'],
                alarm_name=config['name'],
                actions_enabled=True,
                alarm_description=config['message'],
                treat_missing_data=TreatMissingData.NOT_BREACHING,
                threshold=config['threshold'],
                comparison_operator=ComparisonOperator.LESS_THAN_THRESHOLD,
                evaluation_periods=config['datapoints'])
            alarm.add_alarm_action(alarm_action)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # TODO: add resources for Part 2, 3 of blog post.

        # Kinesis Data Streams
        kds = kinesis.Stream(self,
                             "KinesisTweets",
                             stream_name="kinesis-tweets",
                             shard_count=5,
                             retention_period=Duration.hours(48))

        # Fargate Task Role
        task_role = iam.Role(
            self,
            'task_role',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        # Policy to allow task to put records into Kinessis
        task_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'kinesis:PutRecord', 'kinesis:PutRecords',
                                    'kinesis:DescribeStream'
                                ],
                                resources=[kds.stream_arn]))
        # Policy to get secret from SecretsManager
        task_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    'secretsmanager:GetResourcePolicy',
                                    'secretsmanager:GetSecretValue',
                                    'secretsmanager:DescribeSecret',
                                    'secretsmanager:ListSecretVersionIds'
                                ],
                                resources=['*']))

        # VPC
        vpc = ec2.Vpc(
            self,
            'FargateVPC',
            max_azs=2  # Default is all AZs in the region
        )

        # ECS Cluster
        cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc)

        # Fargate Task Definition
        task_definition = ecs.FargateTaskDefinition(self,
                                                    'ServiceTaskDefinition',
                                                    cpu=256,
                                                    memory_limit_mib=512,
                                                    task_role=task_role)

        # Fargate log driver
        fargate_logger = ecs.AwsLogDriver(stream_prefix='fargate_twitter_logs')

        # Container
        task_definition.add_container(
            'ServiceContainer',
            image=ecs.ContainerImage.from_asset('./ECSContainerFiles'),
            environment={
                'KINESIS_STREAM_NAME': kds.stream_name,
                'REGION_NAME': self.region,
                'KEYWORD': 'trump',
                'SECRETS_NAME': 'TwitterAPISecrets'
            },
            logging=fargate_logger)

        # Fargate Service
        service = ecs.FargateService(self,
                                     'ServiceFargateService',
                                     task_definition=task_definition,
                                     assign_public_ip=True,
                                     cluster=cluster)
Beispiel #7
0
from aws_cdk.core import Duration

MAX_SESSION_DURATION = Duration.hours(12)
Beispiel #8
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        context: "Context",
        team_name: str,
        team_policies: List[str],
        image: Optional[str],
    ) -> None:
        self.scope = scope
        self.id = id
        self.context: "Context" = context
        self.team_name: str = team_name
        self.team_policies: List[str] = team_policies
        self.image: Optional[str] = image
        super().__init__(
            scope=scope,
            id=id,
            stack_name=id,
            env=Environment(account=self.context.account_id,
                            region=self.context.region),
        )
        Tags.of(scope=cast(IConstruct, self)).add(
            key="Env", value=f"orbit-{self.context.name}")
        Tags.of(scope=cast(IConstruct, self)).add(key="TeamSpace",
                                                  value=self.team_name)

        if self.context.networking.vpc_id is None:
            raise ValueError("self.context.networking.vpc_id is None!")
        self.i_vpc = ec2.Vpc.from_vpc_attributes(
            scope=self,
            id="vpc",
            vpc_id=self.context.networking.vpc_id,
            availability_zones=cast(
                List[str], self.context.networking.availability_zones),
        )
        self.i_isolated_subnets = Ec2Builder.build_subnets(
            scope=self, subnet_manifests=context.networking.isolated_subnets)
        self.i_private_subnets = Ec2Builder.build_subnets(
            scope=self, subnet_manifests=context.networking.private_subnets)
        administrator_arns: List[str] = [
        ]  # A place to add other admins if needed for KMS
        admin_principals = iam.CompositePrincipal(
            *[iam.ArnPrincipal(arn) for arn in administrator_arns],
            iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"),
        )
        self.team_kms_key: kms.Key = kms.Key(
            self,
            id="kms-key",
            removal_policy=core.RemovalPolicy.RETAIN,
            enabled=True,
            enable_key_rotation=True,
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    resources=["*"],
                    principals=[cast(iam.IPrincipal, admin_principals)],
                )
            ]),
        )
        self.team_security_group: ec2.SecurityGroup = Ec2Builder.build_team_security_group(
            scope=self,
            context=context,
            team_name=self.team_name,
            vpc=self.i_vpc)
        self.policies: List[str] = self.team_policies
        if self.context.scratch_bucket_arn:
            self.scratch_bucket: s3.Bucket = cast(
                s3.Bucket,
                s3.Bucket.from_bucket_attributes(
                    scope=self,
                    id="scratch_bucket",
                    bucket_arn=self.context.scratch_bucket_arn,
                    bucket_name=self.context.scratch_bucket_arn.split(":::")
                    [1],
                ),
            )
        else:
            raise Exception(
                "Scratch bucket was not provided in Manifest ('ScratchBucketArn')"
            )

        self.role_eks_pod = IamBuilder.build_team_role(
            scope=self,
            context=self.context,
            team_name=self.team_name,
            policy_names=self.policies,
            scratch_bucket=cast(s3.IBucket, self.scratch_bucket),
            team_kms_key=self.team_kms_key,
            session_timeout=Duration.hours(12),
        )
        shared_fs_name: str = f"orbit-{context.name}-{self.team_name}-shared-fs"
        if context.shared_efs_fs_id is None:
            raise Exception(
                "Shared EFS File system ID was not provided in Manifest ('SharedEfsFsId')"
            )

        if context.shared_efs_sg_id is None:
            raise Exception(
                "Shared EFS File system security group ID was not provided in Manifest ('SharedEfsSgId')"
            )

        self.shared_fs: efs.FileSystem = cast(
            efs.FileSystem,
            efs.FileSystem.from_file_system_attributes(
                scope=self,
                id=shared_fs_name,
                file_system_id=context.shared_efs_fs_id,
                security_group=ec2.SecurityGroup.from_security_group_id(
                    scope=self,
                    id="team_sec_group",
                    security_group_id=context.shared_efs_sg_id),
            ),
        )

        self.efs_ap: efs.AccessPoint = EfsBuilder.build_file_system_access_point(
            scope=self,
            team_name=team_name,
            shared_fs=self.shared_fs,
            path="shared",
            ap_name=f"{team_name}-shared")
        self.efs_ap_team_private: efs.AccessPoint = EfsBuilder.build_file_system_access_point(
            scope=self,
            team_name=team_name,
            shared_fs=self.shared_fs,
            path="team",
            ap_name=f"{team_name}-private")

        team_ssm_parameter_name: str = f"/orbit/{context.name}/teams/{self.team_name}/team"
        self.context_parameter: ssm.StringParameter = ssm.StringParameter(
            scope=self,
            id=team_ssm_parameter_name,
            string_value=json.dumps({
                "EfsId":
                self.shared_fs.file_system_id,
                "EfsApId":
                self.efs_ap.access_point_id,
                "EfsApIdPrivate":
                self.efs_ap_team_private.access_point_id,
                "EksPodRoleArn":
                self.role_eks_pod.role_arn,
                "ScratchBucket":
                self.scratch_bucket.bucket_name,
                "TeamKmsKeyArn":
                self.team_kms_key.key_arn,
                "TeamSecurityGroupId":
                self.team_security_group.security_group_id,
            }),
            type=ssm.ParameterType.STRING,
            description="Orbit Workbench Team Context.",
            parameter_name=team_ssm_parameter_name,
            simple_name=False,
            tier=ssm.ParameterTier.INTELLIGENT_TIERING,
        )