Exemple #1
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # Lambda Function
        lambdaFn = _lambda.Function(self,
                                    "SNSEventHandler",
                                    runtime=_lambda.Runtime.PYTHON_3_9,
                                    code=_lambda.Code.from_asset("lambda"),
                                    handler="handler.main",
                                    timeout=Duration.seconds(10))

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(self,
                      'logs',
                      log_group_name=f"/aws/lambda/{lambdaFn.function_name}",
                      removal_policy=RemovalPolicy.DESTROY,
                      retention=logs.RetentionDays.ONE_DAY)

        # SNS topic
        topic = sns.Topic(self,
                          'sns-to-lambda-topic-test',
                          display_name='My SNS topic')

        # subscribe Lambda to SNS topic
        topic.add_subscription(subs.LambdaSubscription(lambdaFn))

        # Output information about the created resources
        CfnOutput(self,
                  'snsTopicArn',
                  value=topic.topic_arn,
                  description='The arn of the SNS topic')
        CfnOutput(self,
                  'functionName',
                  value=lambdaFn.function_name,
                  description='The name of the handler function')
Exemple #2
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #SNS Topic
        MySnsTopic = sns.Topic(self, "MySnsTopic")

        # Custom EventBridge Bus
        custom_bus = events.EventBus(self,
                                     "bus",
                                     event_bus_name="test-bus-cdk")

        # EventBridge Rule
        rule = events.Rule(self, "rule", event_bus=custom_bus)

        # Event Pattern to filter events
        rule.add_event_pattern(source=["my-application"],
                               detail_type=["message"])

        # SNS topic as target for Eventbridge Rue
        rule.add_target(targets.SnsTopic(MySnsTopic))

        # CDK Outputs
        CfnOutput(self,
                  "SNS topic name",
                  description="SNS topic name",
                  value=MySnsTopic.topic_name)
        CfnOutput(self,
                  "SNS topic ARN",
                  description="SNS topic ARN",
                  value=MySnsTopic.topic_arn)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the queue
        MySqsQueue = sqs.Queue(self, "MySqsQueue")

        # Create the Topic
        MySnsTopic = sns.Topic(self, "MySnsTopic")

        # Create an SQS topic subscription object
        sqsSubscription = snssubs.SqsSubscription(MySqsQueue)

        # Add the SQS subscription to the sns topic
        MySnsTopic.add_subscription(sqsSubscription)

        # Add policy statement to SQS Policy that is created as part of the new queue
        iam.PolicyStatement(actions=['SQS:SendMessage'],
                            effect=iam.Effect.ALLOW,
                            conditions={'ArnEquals': MySnsTopic.topic_arn},
                            resources=[MySqsQueue.queue_arn],
                            principals=[
                                iam.ServicePrincipal('sns.amazonaws.com')
                            ]
                            )

        CfnOutput(self, "SQS queue name", description="SQS queue name", value=MySqsQueue.queue_name)
        CfnOutput(self, "SQS queue ARN", description="SQS queue arn", value=MySqsQueue.queue_arn)
        CfnOutput(self, "SQS queue URL", description="SQS queue URL", value=MySqsQueue.queue_url)
        CfnOutput(self, "SNS topic name", description="SNS topic name", value=MySnsTopic.topic_name)
        CfnOutput(self, "SNS topic ARN", description="SNS topic ARN", value=MySnsTopic.topic_arn)
 def _setup_sqlserver(self) -> None:
     port = 1433
     database = "test"
     schema = "dbo"
     sqlserver = rds.DatabaseInstance(
         self,
         "aws-data-wrangler-sqlserver-instance",
         instance_identifier="sqlserver-instance-wrangler",
         engine=rds.DatabaseInstanceEngine.sql_server_ex(version=rds.SqlServerEngineVersion.VER_15),
         instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         vpc=self.vpc,
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         publicly_accessible=True,
         s3_import_role=self.rds_role,
         s3_export_role=self.rds_role,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-sqlserver-glue-connection",
         description="Connect to SQL Server.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-sqlserver",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:sqlserver://{sqlserver.instance_endpoint.hostname}:{port};databaseName={database}",  # noqa: E501
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-sqlserver-secret",
         secret_name="aws-data-wrangler/sqlserver",
         description="SQL Server credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "sqlserver",
                     "host": sqlserver.instance_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": sqlserver.instance_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "SqlServerAddress", value=sqlserver.instance_endpoint.hostname)
     CfnOutput(self, "SqlServerPort", value=str(port))
     CfnOutput(self, "SqlServerDatabase", value=database)
     CfnOutput(self, "SqlServerSchema", value=schema)
 def _setup_oracle(self) -> None:
     port = 1521
     database = "ORCL"
     schema = "TEST"
     oracle = rds.DatabaseInstance(
         self,
         "aws-data-wrangler-oracle-instance",
         instance_identifier="oracle-instance-wrangler",
         engine=rds.DatabaseInstanceEngine.oracle_ee(version=rds.OracleEngineVersion.VER_19_0_0_0_2021_04_R1),
         instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         vpc=self.vpc,
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         publicly_accessible=True,
         s3_import_role=self.rds_role,
         s3_export_role=self.rds_role,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-oracle-glue-connection",
         description="Connect to Oracle.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-oracle",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:oracle:thin://@{oracle.instance_endpoint.hostname}:{port}/{database}",  # noqa: E501
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-oracle-secret",
         secret_name="aws-data-wrangler/oracle",
         description="Oracle credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "oracle",
                     "host": oracle.instance_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": oracle.instance_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "OracleAddress", value=oracle.instance_endpoint.hostname)
     CfnOutput(self, "OraclePort", value=str(port))
     CfnOutput(self, "OracleDatabase", value=database)
     CfnOutput(self, "OracleSchema", value=schema)
Exemple #6
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # This resource alone will create a private/public subnet in each AZ as well as nat/internet gateway(s)
        vpc = ec2.Vpc(self, "VPC")

        # To create number of Batch Compute Environment
        count = 3

        batch_ce = []

        # For loop to create Batch Compute Environments
        for i in range(count):
            name = "MyBatchARM64Env" + str(i)
            batch_environment = batch.ComputeEnvironment(
                self,
                name,
                compute_resources=batch.ComputeResources(
                    type=batch.ComputeResourceType.SPOT,
                    bid_percentage=75,
                    instance_types=[
                        ec2.InstanceType("a1.medium"),
                        ec2.InstanceType("a1.large")
                    ],
                    image=ecs.EcsOptimizedImage.amazon_linux2(
                        ecs.AmiHardwareType.ARM),
                    vpc_subnets=ec2.SubnetSelection(
                        subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT),
                    vpc=vpc))

            batch_ce.append(
                batch.JobQueueComputeEnvironment(
                    compute_environment=batch_environment, order=i))

        # Create AWS Batch Job Queue and associate all Batch CE.
        self.batch_queue = batch.JobQueue(self,
                                          "JobQueueArm64",
                                          compute_environments=batch_ce)

        # Create Job Definition to submit job in batch job queue.
        batch_jobDef = batch.JobDefinition(
            self,
            "MyJobDefArm64",
            job_definition_name="CDKJobDefArm64",
            container=batch.JobDefinitionContainer(
                image=ecs.ContainerImage.from_registry(
                    "public.ecr.aws/amazonlinux/amazonlinux:latest"),
                command=["sleep", "60"],
                memory_limit_mib=512,
                vcpus=1),
        )

        # Output resources
        CfnOutput(self, "BatchJobQueue", value=self.batch_queue.job_queue_name)
        CfnOutput(self,
                  "JobDefinition",
                  value=batch_jobDef.job_definition_name)
 def _cfnoutput(self):
     CfnOutput(self, f"s3Bucket", value=self.s3_bucket.bucket_name)
     CfnOutput(self,
               f"StepFunctionsWorkflowExecutionRole",
               value=self.sfn_wf_exec_role.role_arn)
     CfnOutput(self,
               f"AmazonSageMakerExecutionRole",
               value=self.sagemaker_exec_role.role_arn)
     CfnOutput(self, f"SecretsManagerArn", value=self.rds.secret.secret_arn)
     CfnOutput(self, f"StepFunctionsName", value=self.sfn_name)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="get_messages.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)
        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        #Configure the Amazon SQS queue to trigger the AWS Lambda function
        lambda_function.add_event_source(_event.SqsEventSource(queue))

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')
    def _setup_neptune(self, iam_enabled: bool = False, port: int = 8182) -> None:
        cluster = neptune.DatabaseCluster(
            self,
            "DataWrangler",
            vpc=self.vpc,
            instance_type=neptune.InstanceType.R5_LARGE,
            iam_authentication=iam_enabled,
            security_groups=[self.db_security_group],
        )

        CfnOutput(self, "NeptuneClusterEndpoint", value=cluster.cluster_endpoint.hostname)
        CfnOutput(self, "NeptuneReaderEndpoint", value=cluster.cluster_read_endpoint.hostname)
        CfnOutput(self, "NeptunePort", value=str(port))
        CfnOutput(self, "NeptuneIAMEnabled", value=str(iam_enabled))
    def __init__(self, scope, construct_id, props, **kwargs):
        super().__init__(scope, construct_id, **kwargs)

        site_domain_name = props["domain_name"]
        if props["sub_domain_name"]:
            site_domain_name = (
                f'{props["sub_domain_name"]}.{props["domain_name"]}')

        # If S3 website endpoint enabled, it creates the static site using a
        # public S3 as the origin. Otherwise, it creates a private S3 as the
        # origin.
        if props["enable_s3_website_endpoint"]:
            site = StaticSitePublicS3(
                self,
                f"{props['namespace']}-construct",
                site_domain_name=site_domain_name,
                domain_certificate_arn=props["domain_certificate_arn"],
                origin_referer_header_parameter_name=props[
                    "origin_custom_header_parameter_name"],
                hosted_zone_id=props["hosted_zone_id"],
                hosted_zone_name=props["hosted_zone_name"],
            )
        else:
            site = StaticSitePrivateS3(
                self,
                f"{props['namespace']}-construct",
                site_domain_name=site_domain_name,
                domain_certificate_arn=props["domain_certificate_arn"],
                hosted_zone_id=props["hosted_zone_id"],
                hosted_zone_name=props["hosted_zone_name"],
            )

        # Add stack outputs
        CfnOutput(
            self,
            "SiteBucketName",
            value=site.bucket.bucket_name,
        )
        CfnOutput(
            self,
            "DistributionId",
            value=site.distribution.distribution_id,
        )
        CfnOutput(
            self,
            "CertificateArn",
            value=site.certificate.certificate_arn,
        )
Exemple #11
0
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # SQS Queue
        queue = sqs.Queue(
            self, "queue",
        )

        # Custom EventBridge Bus
        custom_bus = events.EventBus(
            self, "bus",
            event_bus_name="test-bus-cdk"
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "rule",
            event_bus=custom_bus
        )
        rule.add_event_pattern(
            source=["my-cdk-application"],
            detail_type=["message-for-queue"]
        )
        rule.add_target(targets.SqsQueue(queue))


        # Stack Outputs
        CfnOutput(
            self, "QueueURL",
            description="URL of SQS Queue",
            value=queue.queue_url
        )
Exemple #12
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        self.vpc = ec2.Vpc(self, "VPC",
                           max_azs=2,
                           cidr="10.10.0.0/16",
                           # configuration will create 3 groups in 2 AZs = 6 subnets.
                           subnet_configuration=[ec2.SubnetConfiguration(
                               subnet_type=ec2.SubnetType.PUBLIC,
                               name="Public",
                               cidr_mask=24
                           ), ec2.SubnetConfiguration(
                               subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,
                               name="Private",
                               cidr_mask=24
                           ), ec2.SubnetConfiguration(
                               subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
                               name="DB",
                               cidr_mask=24
                           )
                           ],
                           # nat_gateway_provider=ec2.NatProvider.gateway(),
                           nat_gateways=2,
                           )
        CfnOutput(self, "Output",
                       value=self.vpc.vpc_id)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        _vpc = ec2.Vpc(self, 'theVpc', max_azs=2)

        _fs = efs.FileSystem(self,
                             'theFileSystem',
                             vpc=_vpc,
                             removal_policy=RemovalPolicy.DESTROY)

        _access_point = _fs.add_access_point(
            'theAccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))

        _efs_lambda = _lambda.Function(
            self,
            'lambdaEfsHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_function'),
            handler='lambda_function.lambda_handler',
            vpc=_vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                _access_point, '/mnt/text'))

        _api = api_gateway.HttpApi(
            self,
            'EFS LAMBDA APIGATEWAY',
            default_integration=integrations.HttpLambdaIntegration(
                id="LambdaFunction", handler=_efs_lambda))

        CfnOutput(self, 'API Url', value=_api.url)
Exemple #14
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        lambda_edge = _lambda.Function(
            self,
            'LambdaEdge',
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='index.handler',
            code=_lambda.Code.from_asset('lambda'),
        )

        #create an S3 bucket used as origin for Cloudfront, not used but origin is a required field
        hosting_bucket = s3.Bucket(self, "MyHostingBucket")

        my_distribution = cloudfront.Distribution(
            self,
            "MyDistribution",
            default_behavior=cloudfront.BehaviorOptions(
                origin=origins.S3Origin(hosting_bucket),
                edge_lambdas=[
                    cloudfront.EdgeLambda(
                        function_version=lambda_edge.current_version,
                        event_type=cloudfront.LambdaEdgeEventType.
                        ORIGIN_REQUEST)
                ]),
            comment='Dynamic content generation using Lambda@Edge')

        CfnOutput(self,
                  "DomainName",
                  value=my_distribution.domain_name,
                  export_name='DomainName',
                  description='CloudFront Domain Name')
Exemple #15
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create VPC and Fargate Cluster
        # NOTE: Limit AZs to avoid reaching resource quotas
        vpc = ec2.Vpc(self, "MyVpc", max_azs=2)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "FargateService",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=fargate_service.load_balancer.load_balancer_dns_name)
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # CloudWatch Logs Group
        log_group = logs.LogGroup(
            self, "logs",
            retention=logs.RetentionDays.ONE_DAY,
            removal_policy = RemovalPolicy.DESTROY
        )

        # Custom EventBridge Bus
        custom_bus = events.EventBus(
            self, "bus",
            event_bus_name="test-bus-cdk"
        )

        # EventBridge Rule
        rule = events.Rule(
            self, "rule",
            event_bus=custom_bus
        )
        rule.add_event_pattern(
            source=["my-cdk-application"],
            detail_type=["message"]
        )
        rule.add_target(targets.CloudWatchLogGroup(log_group))

        CfnOutput(
            self, "LogGroupName",
            description="Name of CloudWatch Log Group",
            value=log_group.log_group_name
        )
Exemple #17
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create a cluster
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc)

        # Create Fargate Service
        fargate_service = ecs_patterns.NetworkLoadBalancedFargateService(
            self,
            "sample-app",
            cluster=cluster,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        fargate_service.service.connections.security_groups[
            0].add_ingress_rule(peer=ec2.Peer.ipv4(vpc.vpc_cidr_block),
                                connection=ec2.Port.tcp(80),
                                description="Allow http inbound from VPC")

        # Setup AutoScaling policy
        scaling = fargate_service.service.auto_scale_task_count(max_capacity=2)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=50,
            scale_in_cooldown=Duration.seconds(60),
            scale_out_cooldown=Duration.seconds(60),
        )

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=fargate_service.load_balancer.load_balancer_dns_name)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the Lambda function to receive the request
        # The source code is in './src' directory
        lambda_fn = lambda_.Function(
            self, "MyFunction",
            runtime=lambda_.Runtime.PYTHON_3_9,
            handler="index.handler",
            code=lambda_.Code.from_asset(os.path.join(DIRNAME, "src")),
            environment={
                "env_var1": "value 1",
                "env_var2": "value 2",
            }
        )

        # Create the HTTP API with CORS
        http_api = _apigw.HttpApi(
            self, "MyHttpApi",
            cors_preflight=_apigw.CorsPreflightOptions(
                allow_methods=[_apigw.CorsHttpMethod.GET],
                allow_origins=["*"],
                max_age=Duration.days(10),
            )
        )

        # Add a route to GET /
        http_api.add_routes(
            path="/",
            methods=[_apigw.HttpMethod.GET],
            integration=_integrations.HttpLambdaIntegration("LambdaProxyIntegration", handler=lambda_fn),
        )

        # Outputs
        CfnOutput(self, "API Endpoint", description="API Endpoint", value=http_api.api_endpoint)
    def __init__(self, scope: Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Subnet configurations for a public and private tier
        subnet1 = SubnetConfiguration(
                name="Public",
                subnet_type=SubnetType.PUBLIC,
                cidr_mask=24)
        subnet2 = SubnetConfiguration(
                name="Private",
                subnet_type=SubnetType.PRIVATE_WITH_NAT,
                cidr_mask=24)

        vpc = Vpc(self,
                  "TheVPC",
                  cidr="10.0.0.0/16",
                  enable_dns_hostnames=True,
                  enable_dns_support=True,
                  max_azs=2,
                  nat_gateway_provider=NatProvider.gateway(),
                  nat_gateways=1,
                  subnet_configuration=[subnet1, subnet2]
                  )

        # This will export the VPC's ID in CloudFormation under the key
        # 'vpcid'
        CfnOutput(self, "vpcid", value=vpc.vpc_id)

        # Prepares output attributes to be passed into other stacks
        # In this case, it is our VPC and subnets.
        self.output_props = props.copy()
        self.output_props['vpc'] = vpc
        self.output_props['subnets'] = vpc.public_subnets
Exemple #20
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, *kwargs)

        vpc = ec2.Vpc(self, "MyVpc", max_azs=2)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        asg = autoscaling.AutoScalingGroup(
            self,
            "DefaultAutoScalingGroup",
            instance_type=ec2.InstanceType("t2.micro"),
            machine_image=ecs.EcsOptimizedImage.amazon_linux2(),
            vpc=vpc,
        )
        capacity_provider = ecs.AsgCapacityProvider(self,
                                                    "AsgCapacityProvider",
                                                    auto_scaling_group=asg)
        cluster.add_asg_capacity_provider(capacity_provider)

        ecs_service = ecs_patterns.NetworkLoadBalancedEc2Service(
            self,
            "Ec2Service",
            cluster=cluster,
            memory_limit_mib=512,
            task_image_options={
                'image':
                ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")
            })

        CfnOutput(self,
                  "LoadBalancerDNS",
                  value=ecs_service.load_balancer.load_balancer_dns_name)
    def _setup_elasticsearch_7_10_fgac(self) -> None:
        domain_name = "wrangler-es-7-10-fgac"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.ELASTICSEARCH_7_10,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AnyPrincipal()],  # FGACs
                    resources=[f"{domain_arn}/*"],
                )
            ],
            fine_grained_access_control=opensearch.AdvancedSecurityOptions(
                master_user_name=self.username,
                master_user_password=self.password_secret,
            ),
            node_to_node_encryption=True,
            encryption_at_rest=opensearch.EncryptionAtRestOptions(
                enabled=True, kms_key=self.key),
            enforce_https=True,
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
    def _setup_opensearch_1_0(self) -> None:
        domain_name = "wrangler-os-1-0"
        validate_domain_name(domain_name)
        domain_arn = f"arn:aws:es:{self.region}:{self.account}:domain/{domain_name}"
        domain = opensearch.Domain(
            self,
            domain_name,
            domain_name=domain_name,
            version=opensearch.EngineVersion.OPENSEARCH_1_0,
            capacity=opensearch.CapacityConfig(
                data_node_instance_type="t3.small.search", data_nodes=1),
            access_policies=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["es:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=[f"{domain_arn}/*"],
                )
            ],
            removal_policy=RemovalPolicy.DESTROY,
        )

        CfnOutput(self,
                  f"DomainEndpoint-{domain_name}",
                  value=domain.domain_endpoint)
 def __init__(self,
              scope: Construct,
              id: str,
              connection_name: str,
              *,
              host_arn: str = None,
              ssm_parameter_space: str = '/github/connections',
              **connection_args) -> None:
     super().__init__(scope, id)
     if host_arn:
         connection_args['host_arn'] = host_arn
     self.connection = aws_codestarconnections.CfnConnection(
         self,
         'github-connection',
         connection_name=connection_name,
         provider_type='GitHub',
         **connection_args)
     CfnOutput(
         self,
         "output",
         value=self.connection.attr_connection_arn,
         description=
         "Validate with Github app connection at: https://console.aws.amazon.com/codesuite/settings/connections"
     )
     if ssm_parameter_space:
         aws_ssm.StringParameter(
             self,
             "ssm",
             string_value=self.connection.attr_connection_arn,
             parameter_name=f"{ssm_parameter_space}/{connection_name}")
Exemple #24
0
 def Function(self, id: str, *, **attr) -> aws_lambda.Function:
     if 'layers' in attr:
         for layer in attr['layers']:
             if layer in self._layers:
                 layer = self._layers[layer]
     fx = aws_lambda.Function(self, id, **attr)
     CfnOutput(self, f"{id}-cfn", value=fx.function_arn)
     return fx
    def __init__(self, app: App, id: str) -> None:
        super().__init__(app, id)

        # SQS queue
        queue = sqs.Queue(self, 's3-to-sqs-test')

        bucket = s3.Bucket(self, "MyBucket")
        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      s3n.SqsDestination(queue))

        # Output information about the created resources
        CfnOutput(self,
                  'sqsQueueUrl',
                  value=queue.queue_url,
                  description='The URL of the SQS queue')
        CfnOutput(self,
                  'bucketName',
                  value=bucket.bucket_name,
                  description='The name of the bucket created')
Exemple #26
0
    def __init__(self,
                 scope: Construct,
                 id: str,
                 name: str,
                 saml_metadata_url: str = None,
                 saml_metadata_document: str = None,
                 ssm_parameter_name: str = None) -> None:
        """Create an IAM SAML Identity Provider

        Args:
            scope (Construct): [description]
            id (str): [description]
            name (str): IAM idp Name
            idp_url (str, optional): [description]. Defaults to None.
            saml_metadata_document (str, optional): [description]. Defaults to None.
        """

        # Load SSO SAML metadata
        if saml_metadata_url:
            resp = requests.get(url=saml_metadata_url)
            saml_metadata_document = resp.text
        if not saml_metadata_document:
            raise AttributeError(
                "Need saml_metadata_url or saml_metadata_document")

        super().__init__(
            scope,
            id,
            name=name,
            metadata_document=aws_iam.SamlMetadataDocument.from_xml(
                saml_metadata_document))
        self.ssm_parameter_name = ssm_parameter_name
        CfnOutput(self.stack,
                  f"{self.node.id}-arn",
                  value=self.saml_provider_arn)
        if self.ssm_parameter_name:
            aws_ssm.StringParameter(self.stack,
                                    f"{self.node.id}-ssm",
                                    string_value=self.saml_provider_arn,
                                    parameter_name=self.ssm_parameter_name)
            CfnOutput(self.stack,
                      f"{self.node.id}-ssm-name",
                      value=self.ssm_parameter_name)
Exemple #27
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        innerSfnPassState = sfn.Pass(self, 'PassState');

        innerSfn = sfn.StateMachine(self, 'InnerStepFunction',
            definition = innerSfnPassState,
            timeout=Duration.minutes(60)
        )

        task1 = tasks.StepFunctionsStartExecution(self, "StepFunction1",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task2 = tasks.StepFunctionsStartExecution(self, "StepFunction2",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        task3 = tasks.StepFunctionsStartExecution(self, "StepFunction3",
          state_machine=innerSfn,
          integration_pattern=sfn.IntegrationPattern.RUN_JOB,
          input=sfn.TaskInput.from_object({
              "input.$": "$.Output.input"
          }),
          output_path="$",
          result_selector = {
                "Output.$": "$.Output"
          }
        )

        outer_sfn = sfn.StateMachine(self, "OuterStepFunction",
                definition=task1.next(task2).next(task3),
                timeout=Duration.minutes(60)
        )

        CfnOutput(self, "StepFunctionArn",
            value = outer_sfn.state_machine_arn,
            export_name = 'OuterStepFunctionArn',
            description = 'Outer Step Function arn')
Exemple #28
0
    def __init__(self, scope: Construct, stack_name: str, component_id: str,
                 **kwargs):
        super().__init__(scope=scope, id=component_id, **kwargs)

        self.stack_name = stack_name
        self.component_id = component_id
        # Greengrassコンポーネントの名前。Recipeと合わせておく必要があります
        self.greengrass_component_name = "com.example.ggmlcomponent"

        self.create_deploy_pipeline()

        CfnOutput(
            self,
            id="ComponentCodeRepositoryURI",
            export_name="ComponentCodeRepositoryURI",
            value=self._component_source_repository.repository_clone_url_grc)

        CfnOutput(self,
                  id="ComponentBaseImageRepositoryURI",
                  export_name="ComponentBaseImageRepositoryURI",
                  value=self._component_base_ecr.repository_uri)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        phoneNumber = CfnParameter(self,
                                   "phoneNumber",
                                   type="String",
                                   description="Recipient phone number")
        tenDLC = CfnParameter(self,
                              "tenDLC",
                              type="String",
                              description="10DLC origination number")

        # We create a log group so it will be gracefully cleaned up on a destroy event.  By default
        # logs never expire and won't be removed.
        lambdaLogGroup = logs.LogGroup(
            self,
            'SMSPublisherFunctionLogGroup',
            log_group_name='/aws/lambda/SMSPublisherFunction',
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.FIVE_DAYS,
        )

        SMSPublisherFunction = aws_lambda.Function(
            self,
            'SMSPublisherFunction',
            code=aws_lambda.Code.from_asset('src'),
            function_name='SMSPublisherFunction',
            handler='app.handler',
            runtime=aws_lambda.Runtime.NODEJS_12_X,
            timeout=Duration.seconds(3),
            memory_size=128,
            environment={
                'phoneNumber': phoneNumber.value_as_string,
                'tenDLC': tenDLC.value_as_string
            },
            initial_policy=[
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.DENY,
                                    resources=['arn:aws:sns:*:*:*']),
                iam.PolicyStatement(actions=['sns:Publish'],
                                    effect=iam.Effect.ALLOW,
                                    resources=['*'])
            ],
        )
        # Make sure the log group is created prior to the function so CDK doesn't create a new one
        SMSPublisherFunction.node.add_dependency(lambdaLogGroup)

        CfnOutput(self,
                  'SMSPublisherFunctionName',
                  description='SMSPublisherFunction function name',
                  value=SMSPublisherFunction.function_name)
    def __init__(self, app: App, id: str, props, **kwargs) -> None:
        super().__init__(app, id, **kwargs)
        # define the s3 artifact
        source_output = aws_codepipeline.Artifact(artifact_name='source')
        # define the pipeline
        pipeline = aws_codepipeline.Pipeline(
            self,
            "Pipeline",
            pipeline_name=f"{props['namespace']}",
            artifact_bucket=props['bucket'],
            stages=[
                aws_codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.S3SourceAction(
                            bucket=props['bucket'],
                            bucket_key='source.zip',
                            action_name='S3Source',
                            run_order=1,
                            output=source_output,
                            trigger=aws_codepipeline_actions.S3Trigger.POLL),
                    ]),
                aws_codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='DockerBuildImages',
                            input=source_output,
                            project=props['cb_docker_build'],
                            run_order=1,
                        )
                    ])
            ])
        # give pipelinerole read write to the bucket
        props['bucket'].grant_read_write(pipeline.role)

        #pipeline param to get the
        pipeline_param = aws_ssm.StringParameter(
            self,
            "PipelineParam",
            parameter_name=f"{props['namespace']}-pipeline",
            string_value=pipeline.pipeline_name,
            description='cdk pipeline bucket')
        # cfn output
        CfnOutput(self,
                  "PipelineOut",
                  description="Pipeline",
                  value=pipeline.pipeline_name)