示例#1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open(INPUT_WORKFLOW_FILE) as f:
            workflow_dict = json.load(f)
            glue_role = Role(
                self,
                "dummy_glue_job_role",
                assumed_by=ServicePrincipal("glue.amazonaws.com"),
                managed_policies=[
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSGlueServiceRole")
                ],
            )
            # mock job
            job_command = CfnJob.JobCommandProperty(
                name='pythonshell', script_location=SCRIPT_LOCATION)

            for workflow_item in workflow_dict:
                for node in workflow_item['nodes']:
                    triggered_job_name = node['id']
                    job = CfnJob(self,
                                 triggered_job_name,
                                 name=triggered_job_name,
                                 command=job_command,
                                 role=glue_role.role_name)
 def __create_s3_trigger_lambda_execution_role(
         self, bucket_name: str,
         cloudfront_distribution: aws_cloudfront.Distribution
 ) -> aws_iam.Role:
     return aws_iam.Role(
         self,
         "S3TriggerLambdaExecutionRole",
         assumed_by=ServicePrincipal('lambda.amazonaws.com'),
         description=
         'Execution role that allows the lambda function to get the uploaded zip from S3, upload the '
         'unpacked one and invalidate the CDN',
         path='/',
         managed_policies=[
             ManagedPolicy.from_aws_managed_policy_name(
                 'service-role/AWSLambdaBasicExecutionRole')
         ],
         inline_policies={
             's3_trigger_artifacts-upload-role':
             PolicyDocument(statements=[
                 PolicyStatement(
                     effect=Effect.ALLOW,
                     actions=[
                         's3:PutObject', 's3:GetObject', 's3:ListObject',
                         's3:DeleteObject', 's3:HeadBucket',
                         'cloudfront:CreateInvalidation'
                     ],
                     resources=[
                         f'arn:aws:cloudfront::{Fn.ref("AWS::AccountId")}:distribution/'
                         f'{cloudfront_distribution.distribution_id}',
                         f'arn:aws:s3:::{bucket_name}',
                         f'arn:aws:s3:::{bucket_name}/*'
                     ])
             ])
         })
示例#3
0
    def _create_lambdas(self):
        clean_pycache()

        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(
                    self,
                    f"{name}_role",
                    assumed_by=ServicePrincipal(service="lambda.amazonaws.com")
                )
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"))

                lambda_args = {
                    "code": Code.from_asset(root),
                    "handler": "__init__.handle",
                    "runtime": Runtime.PYTHON_3_8,
                    "layers": layers,
                    "function_name": name,
                    "environment": lambda_config["variables"],
                    "role": lambda_role,
                    "timeout": Duration.seconds(lambda_config["timeout"]),
                    "memory_size": lambda_config["memory"],
                }
                if "concurrent_executions" in lambda_config:
                    lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"]

                self.lambdas[name] = Function(self, name, **lambda_args)

        self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue))

        Rule(
            self,
            "titles_updater",
            schedule=Schedule.cron(hour="2", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-titles_updater"])]
        )
        Rule(
            self,
            "episodes_updater",
            schedule=Schedule.cron(hour="4", minute="10"),
            targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])]
        )
示例#4
0
    def create_default_infrastructure_config(
            self, construct_id: str) -> CfnInfrastructureConfiguration:
        """
        Create the default infrastructure config, which defines the permissions needed by Image Builder during
        image creation.
        """
        image_builder_role_name = f"DeadlineMachineImageBuilderRole{construct_id}"
        image_builder_role = Role(
            self,
            image_builder_role_name,
            assumed_by=ServicePrincipal("ec2.amazonaws.com"),
            role_name=image_builder_role_name)
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'EC2InstanceProfileForImageBuilder'))
        image_builder_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        image_builder_role.add_to_policy(
            PolicyStatement(actions=[
                's3:Get*',
                's3:List*',
            ],
                            resources=['arn:aws:s3:::thinkbox-installers/*']))

        image_builder_profile_name = f"DeadlineMachineImageBuilderPolicy{construct_id}"
        image_builder_profile = CfnInstanceProfile(
            self,
            image_builder_profile_name,
            instance_profile_name=image_builder_profile_name,
            roles=[image_builder_role_name])
        image_builder_profile.add_depends_on(
            image_builder_role.node.default_child)

        infrastructure_configuration = CfnInfrastructureConfiguration(
            self,
            f"InfrastructureConfig{construct_id}",
            name=f"DeadlineInfrastructureConfig{construct_id}",
            instance_profile_name=image_builder_profile_name)
        infrastructure_configuration.add_depends_on(image_builder_profile)

        return infrastructure_configuration
    def create_managed_policy(self):
        statement = PolicyStatement(effect=Effect.ALLOW,
                                    actions=[
                                        "s3:PutObject",
                                    ],
                                    resources=[
                                        f'{self.bucket.bucket_arn}/*',
                                    ])

        return ManagedPolicy(
            self,
            'Managed Policy',
            managed_policy_name='sfn_lambda_policy',
            statements=[statement],
        )
 def __create_cloud_front_lambda_execution_role(self) -> aws_iam.Role:
     role = aws_iam.Role(
         self,
         'CloudFrontLambdaExecutionRole',
         assumed_by=ServicePrincipal('lambda.amazonaws.com'),
         path='/',
         managed_policies=[
             ManagedPolicy.from_aws_managed_policy_name(
                 'service-role/AWSLambdaBasicExecutionRole')
         ])
     role.assume_role_policy.add_statements(
         PolicyStatement(
             principals=[ServicePrincipal('edgelambda.amazonaws.com')],
             actions=['sts:AssumeRole']))
     return role
示例#7
0
文件: main.py 项目: jqbx-bot/bot
 def __init__(self, scope: Construct, _id: str, **kwargs) -> None:
     super().__init__(scope, _id, **kwargs)
     task_definition = FargateTaskDefinition(
         self,
         'TaskDefinition',
         cpu=256,
         memory_limit_mib=512,
         execution_role=Role(
             self,
             'ExecutionRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com'))
         ),
         task_role=Role(
             self,
             'TaskRole',
             assumed_by=cast(IPrincipal, ServicePrincipal('ecs-tasks.amazonaws.com')),
             managed_policies=[
                 ManagedPolicy.from_aws_managed_policy_name('AmazonSESFullAccess')
             ]
         )
     )
     task_definition.add_container(
         'Container',
         image=ContainerImage.from_asset(
             getcwd(),
             file='Dockerfile',
             repository_name='jqbx-bot',
             exclude=['cdk.out']
         ),
         command=['pipenv', 'run', 'python', '-u', '-m', 'src.main'],
         environment={
             'SPOTIFY_USER_ID': environ.get('SPOTIFY_USER_ID'),
             'JQBX_ROOM_ID': environ.get('JQBX_ROOM_ID'),
             'JQBX_BOT_DISPLAY_NAME': environ.get('JQBX_BOT_DISPLAY_NAME'),
             'JQBX_BOT_IMAGE_URL': environ.get('JQBX_BOT_IMAGE_URL'),
             'DATA_SERVICE_BASE_URL': environ.get('DATA_SERVICE_BASE_URL')
         },
         logging=AwsLogDriver(
             stream_prefix='jqbx-bot',
             log_group=LogGroup(self, 'LogGroup')
         )
     )
     cluster = Cluster(self, '%sCluster' % _id)
     FargateService(self, '%sService' % _id, cluster=cluster, task_definition=task_definition, desired_count=1)
示例#8
0
    def _create_lambdas(self):
        for root, dirs, files in os.walk(LAMBDAS_DIR):
            for f in files:
                if f != "__init__.py":
                    continue

                parent_folder = os.path.basename(os.path.dirname(root))
                lambda_folder = os.path.basename(root)
                name = f"{parent_folder}-{lambda_folder}"
                lambda_config = self.lambdas_config[name]

                layers = []
                for layer_name in lambda_config["layers"]:
                    layers.append(self.layers[layer_name])

                lambda_role = Role(self,
                                   f"{name}_role",
                                   assumed_by=ServicePrincipal(
                                       service="lambda.amazonaws.com"))
                for policy in lambda_config["policies"]:
                    lambda_role.add_to_policy(policy)
                lambda_role.add_managed_policy(
                    ManagedPolicy.from_aws_managed_policy_name(
                        "service-role/AWSLambdaBasicExecutionRole"))

                self.lambdas[name] = Function(
                    self,
                    name,
                    code=Code.from_asset(root),
                    handler="__init__.handle",
                    runtime=Runtime.PYTHON_3_8,
                    layers=layers,
                    function_name=name,
                    environment=lambda_config["variables"],
                    role=lambda_role,
                    timeout=Duration.seconds(lambda_config["timeout"]),
                    memory_size=lambda_config["memory"],
                )

        Rule(self,
             "update_eps",
             schedule=Schedule.cron(hour="2", minute="10"),
             targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
示例#9
0
    def attach_iam_policies_to_role(cls, role: Role):
        """
        Attach the necessary policies to read secrets from SSM and SecretsManager

        :param role:
        :return:
        """
        # TODO: Extract this in a managed policy
        secretsmanager_readonly_policy = PolicyStatement(
            resources=["*"],
            effect=Effect.ALLOW,
            actions=[
                "secretsmanager:GetResourcePolicy",
                "secretsmanager:GetSecretValue",
                "secretsmanager:DescribeSecret",
                "secretsmanager:ListSecretVersionIds",
            ]
        )
        role.add_to_policy(secretsmanager_readonly_policy)
        role.add_managed_policy(ManagedPolicy.from_aws_managed_policy_name('AmazonSSMReadOnlyAccess'))
示例#10
0
    def __init__(self, scope: Construct, id: str) -> None:
        super().__init__(scope, id)

        policy = ManagedPolicy(self, "Policy")
        # (de)registerTaskDefinitions doesn't support specific resources
        ecs_task = PolicyStatement(
            actions=[
                "ecs:DeregisterTaskDefinition",
                "ecs:RegisterTaskDefinition",
            ],
            resources=["*"],
        )
        # ListTagsForResource cannot be set for a service only, but has to be
        # on the cluster (despite it only looking at the tags for the service).
        # We make a separate statement for this, to avoid giving other polcies
        # more rights than required, as they can be per service.
        self._cluster_statement = PolicyStatement(actions=[
            "ecs:ListTagsForResource",
        ], )
        # All other actions can be combined, as they don't collide. As policies
        # have a maximum amount of bytes they can consume, this spares a few of
        # them.
        self._statement = PolicyStatement(actions=[
            "iam:PassRole",
            "ssm:GetParameter",
            "ssm:GetParameters",
            "ssm:PutParameter",
            "ecs:UpdateService",
            "ecs:DescribeServices",
            "cloudformation:UpdateStack",
            "cloudformation:DescribeStacks",
        ], )

        policy.add_statements(ecs_task)
        policy.add_statements(self._cluster_statement)
        policy.add_statements(self._statement)
示例#11
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        global g_lambda_edge

        Tags.of(self).add("Stack", "Common-Lambda-Edge")

        self._role = Role(
            self,
            "EdgeLambdaRole",
            assumed_by=CompositePrincipal(
                ServicePrincipal("lambda.amazonaws.com"),
                ServicePrincipal("edgelambda.amazonaws.com"),
            ),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
            ],
        )

        if g_lambda_edge is not None:
            raise Exception("Only a single LambdaEdgeStack instance can exist")
        g_lambda_edge = self
示例#12
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        attribute_tagged_group = Group(self, "Flexible Tagged")
        access_project = core.CfnTag(key="access-project", value="elysian")
        access_team = core.CfnTag(key="access-team", value="webdev")
        access_cost_center = core.CfnTag(key="cost-center", value="2600")

        flexible_boundary_policy = CfnManagedPolicy(
            self,
            "FlexiblePermissionBoundary",
            policy_document=json.loads(flexible_policy_permission_boundary),
        )

        CfnUser(
            self,
            "Developer",
            tags=[access_project, access_team, access_cost_center],
            groups=[attribute_tagged_group.group_name],
            permissions_boundary=flexible_boundary_policy.ref,
        )

        # Add AWS managed policy for EC2 Read Only access for the console.
        attribute_tagged_group.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "FlexibleAttributePolicy",
            policy_document=json.loads(full_attribute_based_policy),
            groups=[attribute_tagged_group.group_name],
        )

        vpc = Vpc.from_lookup(self, "AttributeTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of cdk v1.31
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[access_project, access_team, access_cost_center],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="elysian-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="elysian-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-elysian-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-elysian-ami"
        )
示例#14
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: ICluster,
        ecs_security_group: SecurityGroup,
        ecs_source_security_group: SecurityGroup,
        vpc: IVpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        global g_nlb

        Tags.of(self).add("Stack", "Common-Nlb")

        # TODO -- You need to do some manual actions:
        # TODO --  1) enable auto-assign IPv6 address on public subnets
        # TODO --  2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0"

        self.private_zone = HostedZone.from_lookup(
            self,
            "PrivateZone",
            domain_name="openttd.internal",
            private_zone=True,
        )

        user_data = UserData.for_linux(shebang="#!/bin/bash -ex")

        asset = Asset(self, "NLB", path="user_data/nlb/")
        user_data.add_commands(
            "echo 'Extracting user-data files'",
            "mkdir /nlb",
            "cd /nlb",
        )
        user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key,
            local_file="/nlb/files.zip",
        )
        user_data.add_commands("unzip files.zip", )

        user_data.add_commands(
            "echo 'Setting up configuration'",
            f"echo '{self.region}' > /etc/.region",
            f"echo '{cluster.cluster_name}' > /etc/.cluster",
        )

        user_data.add_commands(
            "echo 'Installing nginx'",
            "amazon-linux-extras install epel",
            "yum install nginx -y",
            "cp /nlb/nginx.conf /etc/nginx/nginx.conf",
            "mkdir /etc/nginx/nlb.d",
        )

        user_data.add_commands(
            "echo 'Installing Python3'",
            "yum install python3 -y",
            "python3 -m venv /venv",
            "/venv/bin/pip install -r /nlb/requirements.txt",
        )

        user_data.add_commands(
            "echo 'Generating nginx configuration'",
            "cd /etc/nginx/nlb.d",
            "/venv/bin/python /nlb/nginx.py",
            "systemctl start nginx",
        )

        user_data.add_commands(
            "echo 'Setting up SOCKS proxy'",
            "useradd pproxy",
            "cp /nlb/pproxy.service /etc/systemd/system/",
            "systemctl daemon-reload",
            "systemctl enable pproxy.service",
            "systemctl start pproxy.service",
        )

        asg = AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.nano"),
            machine_image=MachineImage.latest_amazon_linux(
                generation=AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
            vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC,
                                        one_per_az=True),
            user_data=user_data,
            health_check=HealthCheck.elb(grace=Duration.seconds(0)),
        )
        asg.add_security_group(ecs_security_group)

        asg.role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        asset.grant_read(asg.role)
        policy = ManagedPolicy(self, "Policy")
        policy_statement = PolicyStatement(
            actions=[
                "ec2:DescribeInstances",
                "ecs:DescribeContainerInstances",
                "ecs:DescribeTasks",
                "ecs:ListContainerInstances",
                "ecs:ListServices",
                "ecs:ListTagsForResource",
                "ecs:ListTasks",
            ],
            resources=["*"],
        )
        policy.add_statements(policy_statement)
        asg.role.add_managed_policy(policy)

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.security_group = asg.node.children[0]

        listener_https.add_targets(
            subdomain_name=self.admin_subdomain_name,
            port=80,
            target=asg,
            priority=2,
        )

        # Create a Security Group so the lambdas can access the EC2.
        # This is needed to check if the EC2 instance is fully booted.
        lambda_security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )
        self.security_group.add_ingress_rule(
            peer=lambda_security_group,
            connection=Port.tcp(80),
            description="Lambda to target",
        )

        self.security_group.add_ingress_rule(
            peer=ecs_source_security_group,
            connection=Port.udp(8080),
            description="ECS to target",
        )

        self.create_ecs_lambda(
            cluster=cluster,
            auto_scaling_group=asg,
        )

        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING,
            timeout=Duration.seconds(180),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )
        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING,
            timeout=Duration.seconds(30),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )

        # Initialize the NLB record on localhost, as we need to be able to
        # reference it for other entries to work correctly.
        ARecord(
            self,
            "ARecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        AaaaRecord(
            self,
            "AAAARecord",
            target=RecordTarget.from_ip_addresses("::1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        # To make things a bit easier, also alias to staging.
        self.create_alias(self, "nlb.staging")

        # Create a record for the internal DNS
        ARecord(
            self,
            "APrivateRecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=self.private_zone,
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )

        if g_nlb is not None:
            raise Exception("Only a single NlbStack instance can exist")
        g_nlb = self
示例#15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        username_tagged = Group(self, "Username Tagged")

        developer = User(self, "Developer")
        developer.add_to_group(username_tagged)

        # Add AWS managed policy for EC2 Read Only access for the console.
        username_tagged.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "UserTaggedPolicy",
            policy_document=json.loads(username_based_policy),
            groups=[username_tagged.group_name],
        )

        vpc = Vpc.from_lookup(self, "UsernameTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of 1.31
        dev_username_tag = core.CfnTag(
            key="username", value=developer.user_name)
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[dev_username_tag],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="username-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="username-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-username-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-username-ami"
        )
示例#16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        graphql_api = CfnGraphQLApi(self,
                                    'WeatherApi',
                                    name='weather-api',
                                    authentication_type='API_KEY')

        CfnApiKey(self, 'WeatherApiKey', api_id=graphql_api.attr_api_id)

        api_schema = CfnGraphQLSchema(self,
                                      'WeatherSchema',
                                      api_id=graphql_api.attr_api_id,
                                      definition="""
                type Destination {
                    id: ID!
                    description: String!
                    state: String!
                    city: String!
                    zip: String!
                    conditions: Weather!
                }
                        
                type Mutation {
                    addDestination(
                        id: ID,
                        description: String!,
                        state: String!,
                        city: String!,
                        zip: String!
                    ): Destination!
                }
                        
                type Query {
                    getWeather(city: String!): Weather
                    # Get a single value of type 'Post' by primary key.
                    getDestination(id: ID!, zip: String): Destination
                    getAllDestinations: [Destination]
                    getDestinationsByState(state: String!): [Destination]
                }
                        
                type Subscription {
                    newDestination: Destination
                        @aws_subscribe(mutations: ["addDestination"])
                }
                        
                type Weather {
                    description: String
                    current: String
                    maxTemp: String
                    minTemp: String
                }
                        
                schema {
                    query: Query
                    mutation: Mutation
                    subscription: Subscription
                }
            """)

        table_name = 'destinations'

        table = Table(self,
                      'DestinationsTable',
                      table_name=table_name,
                      partition_key=Attribute(
                          name="id",
                          type=AttributeType.STRING,
                      ),
                      billing_mode=BillingMode.PAY_PER_REQUEST,
                      stream=StreamViewType.NEW_IMAGE)

        table_role = Role(self,
                          'DestinationsDynamoDBRole',
                          assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'))

        data_source = CfnDataSource(
            self,
            'DestinationsDataSource',
            api_id=graphql_api.attr_api_id,
            name='DestinationsDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=table.table_name, aws_region=self.region),
            service_role_arn=table_role.role_arn)

        lambdaFn = Function(self,
                            "GetWeather",
                            code=Code.asset(os.getcwd() + "/lambdas/weather/"),
                            handler="weather.get",
                            timeout=core.Duration.seconds(900),
                            memory_size=128,
                            runtime=Runtime.NODEJS_10_X,
                            environment={'APPID': os.getenv('APPID')})

        lambda_role = Role(
            self,
            'WeatherLambdaRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        lambda_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name('AWSLambdaFullAccess'))

        lambda_source = CfnDataSource(
            self,
            'WeatherDataSource',
            api_id=graphql_api.attr_api_id,
            name='WeatherCondition',
            type='AWS_LAMBDA',
            lambda_config=CfnDataSource.LambdaConfigProperty(
                lambda_function_arn=lambdaFn.function_arn),
            service_role_arn=lambda_role.role_arn)

        self.add_resolvers(graphql_api,
                           api_schema,
                           data_source=data_source,
                           lambda_source=lambda_source)
示例#17
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        table = dynamodb.Table(
            self,
            "TheTable",
            table_name="cdk-table",
            partition_key=dynamodb.Attribute(
                name="id", type=dynamodb.AttributeType.STRING),
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )

        # compute_environment = batch.ComputeEnvironment(
        #     self,
        #     "MyComputeEnvironment",
        #     compute_environment_name="cdk-env",
        #     compute_resources=batch.ComputeResources(
        #         vpc=Vpc.from_lookup(self, "VPC", is_default=True),
        #     ),
        #     enabled=True,
        #     managed=True,
        # )

        job_role = Role(
            self,
            "BatchJobRole",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            description="Role for a container in a Batch job",
            role_name="CDK-BatchJobRole",
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name="AmazonDynamoDBFullAccess"),
            ],
        )

        repository = Repository(
            self,
            "MyRepository",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            repository_name="cdk-my-repository",
            lifecycle_rules=[
                LifecycleRule(max_image_count=5, description="Max 5 images")
            ],
        )

        image: ContainerImage = ContainerImage.from_ecr_repository(
            repository=repository,
            tag="latest",
        )

        container = batch.JobDefinitionContainer(
            image=image,
            job_role=job_role,
            command=["python", "run.py", "--help"],
            environment={
                "READINGS_TABLE": table.table_name,
                "AWS_REGION": self.region,
            },
            vcpus=1,
            log_configuration=batch.LogConfiguration(
                log_driver=batch.LogDriver.AWSLOGS),
            memory_limit_mib=2048,
        )

        batch.JobDefinition(
            self,
            "JobDefinitionCreate",
            container=container,
            job_definition_name="create",
            retry_attempts=1,
        )
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, instance: IInstance, neo4j_user_secret: ISecret, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket = Bucket(self, "s3-bucket-altimeter", 
            bucket_name=config["s3_bucket"],
            encryption=BucketEncryption.UNENCRYPTED, #.S3_MANAGED, # Disable encryption since it's not really required and it conflicts with SCP guardrails set by Control Tower on the Audit account.
            block_public_access=BlockPublicAccess.BLOCK_ALL
        )

        cluster = Cluster(self, "ecs-cluster-altimeter", 
            cluster_name="ecsclstr-altimeter--default",
            vpc=vpc               
        )

        task_role = Role(self, "iam-role-altimeter-task-role",
            assumed_by=ServicePrincipal("ecs-tasks.amazonaws.com"),
            # It appears that within the account where the scanner is running, the task role is (partially) used for scanning resources (rather than the altimeter-scanner-access role).      
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('SecurityAudit'),
                ManagedPolicy.from_aws_managed_policy_name('job-function/ViewOnlyAccess')
            ]
        )

        task_definition = FargateTaskDefinition(self, "ecs-fgtd-altimeter",
            task_role=task_role,
            memory_limit_mib=self.MEMORY_LIMIT,
            cpu=self.CPU
        )

        docker_path = os.path.join(os.path.curdir,"..")

        image_asset = DockerImageAsset(self, 'ecr-assets-dia-altimeter', 
            directory=docker_path,
            file="scanner.Dockerfile"
        )            

        task_definition.add_container("ecs-container-altimeter",            
            image= ContainerImage.from_docker_image_asset(image_asset),
            # memory_limit_mib=self.MEMORY_LIMIT,
            # cpu=self.CPU,
            environment= {
                "CONFIG_PATH": config["altimeter_config_path"],
                "S3_BUCKET": config["s3_bucket"]
            },
            logging= AwsLogDriver(
                stream_prefix= 'altimeter',
                log_retention= RetentionDays.TWO_WEEKS
            )
        )

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["arn:aws:iam::*:role/"+config["account_execution_role"]],
            actions=['sts:AssumeRole']
        ))

        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=[
                "arn:aws:s3:::"+config["s3_bucket"],
                "arn:aws:s3:::"+config["s3_bucket"]+"/*"
            ],
            actions=["s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject",
                "s3:Abort*",
                "s3:PutObjectTagging"]
        ))

        # Grant the ability to record the stdout to CloudWatch Logs
        # TODO: Refine
        task_definition.add_to_task_role_policy(PolicyStatement(
            resources=["*"],
            actions=['logs:*']
        ))

        # Trigger task every 24 hours
        Rule(self, "events-rule-altimeter-daily-scan",
            rule_name="evrule--altimeter-daily-scan",
            schedule=Schedule.cron(hour="0", minute="0"),
            description="Daily altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )

        # Trigger task manually via event
        Rule(self, "events-rule-altimeter-manual-scan",
            rule_name="evrule--altimeter-manual-scan",
            event_pattern=EventPattern(source=['altimeter']), 
            description="Manual altimeter scan",
            targets=[EcsTask(
                task_definition=task_definition,
                cluster=cluster,
                subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE)
            )]
        )        


        # Don't put Neo4j Importer lambda in a separate stack since it causes a circular reference with the S3 event source, and using an imported bucket as event source is not possible (you need a Bucket, not an IBucket)



        neo4j_importer_function = PythonFunction(self, 'lambda-function-neo4j-importer',
            function_name="function-altimeter--neo4j-importer",             
            entry="../neo4j-importer",
            index="app.py",
            handler="lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            memory_size=256,
            timeout=cdk.Duration.seconds(60),
            vpc=vpc,
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            environment={
                "neo4j_address": instance.instance_private_ip,
                "neo4j_user_secret_name": neo4j_user_secret.secret_name
            }
        )

        neo4j_importer_function.add_event_source(
            S3EventSource(bucket,
                events= [EventType.OBJECT_CREATED, EventType.OBJECT_REMOVED],
                filters= [ { "prefix": "raw/", "suffix": ".rdf"}]
            )
        )

        # Grant lambda read/write access to the S3 bucket for reading raw rdf, writing prepared rdf and generating signed uri
        bucket.grant_read_write(neo4j_importer_function.role)
        # Grant lambda read access to the neo4j user secret
        neo4j_user_secret.grant_read(neo4j_importer_function.role)
示例#19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        table_name = 'items'

        items_graphql_api = CfnGraphQLApi(self,
                                          'ItemsApi',
                                          name='items-api',
                                          authentication_type='API_KEY')

        CfnApiKey(self, 'ItemsApiKey', api_id=items_graphql_api.attr_api_id)

        api_schema = CfnGraphQLSchema(self,
                                      'ItemsSchema',
                                      api_id=items_graphql_api.attr_api_id,
                                      definition=f"""\
                type {table_name} {{
                    {table_name}Id: ID!
                    name: String
                }}
                type Paginated{table_name} {{
                    items: [{table_name}!]!
                    nextToken: String
                }}
                type Query {{
                    all(limit: Int, nextToken: String): Paginated{table_name}!
                    getOne({table_name}Id: ID!): {table_name}
                }}
                type Mutation {{
                    save(name: String!): {table_name}
                    delete({table_name}Id: ID!): {table_name}
                }}
                type Schema {{
                    query: Query
                    mutation: Mutation
                }}""")

        items_table = Table(
            self,
            'ItemsTable',
            table_name=table_name,
            partition_key=Attribute(name=f'{table_name}Id',
                                    type=AttributeType.STRING),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            stream=StreamViewType.NEW_IMAGE,

            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new table, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will delete the table (even if it
            # has data in it)
            removal_policy=core.RemovalPolicy.
            DESTROY  # NOT recommended for production code
        )

        items_table_role = Role(
            self,
            'ItemsDynamoDBRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com'))

        items_table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'))

        data_source = CfnDataSource(
            self,
            'ItemsDataSource',
            api_id=items_graphql_api.attr_api_id,
            name='ItemsDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=items_table.table_name, aws_region=self.region),
            service_role_arn=items_table_role.role_arn)

        get_one_resolver = CfnResolver(
            self,
            'GetOneQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='getOne',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "GetItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_one_resolver.add_depends_on(api_schema)

        get_all_resolver = CfnResolver(
            self,
            'GetAllQueryResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Query',
            field_name='all',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "Scan",
                "limit": $util.defaultIfNull($ctx.args.limit, 20),
                "nextToken": $util.toJson($util.defaultIfNullOrEmpty($ctx.args.nextToken, null))
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        get_all_resolver.add_depends_on(api_schema)

        save_resolver = CfnResolver(
            self,
            'SaveMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='save',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "PutItem",
                "key": {{
                    "{table_name}Id": {{ "S": "$util.autoId()" }}
                }},
                "attributeValues": {{
                    "name": $util.dynamodb.toDynamoDBJson($ctx.args.name)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        save_resolver.add_depends_on(api_schema)

        delete_resolver = CfnResolver(
            self,
            'DeleteMutationResolver',
            api_id=items_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='delete',
            data_source_name=data_source.name,
            request_mapping_template=f"""\
            {{
                "version": "2017-02-28",
                "operation": "DeleteItem",
                "key": {{
                "{table_name}Id": $util.dynamodb.toDynamoDBJson($ctx.args.{table_name}Id)
                }}
            }}""",
            response_mapping_template="$util.toJson($ctx.result)")

        delete_resolver.add_depends_on(api_schema)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # SQS queue
        state_change_sqs = Queue(
            self,
            "state_change_sqs",
            visibility_timeout=core.Duration.seconds(60)
        )

        # Dynamodb Tables
        # EC2 state changes
        tb_states = Table(
            self, "ec2_states", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.NEW_IMAGE)

        # EC2 inventory
        tb_inventory = Table(
            self, "ec2_inventory", partition_key=Attribute(name="instance-id",
                type=AttributeType.STRING),
            sort_key=Attribute(
                name="time",
                type=AttributeType.STRING
            ),
            billing_mode=BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            stream=StreamViewType.KEYS_ONLY)

        # IAM policies - AWS managed
        basic_exec = ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")
        sqs_access = ManagedPolicy(self, "LambdaSQSExecution",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "sqs:ReceiveMessage",
                        "sqs:DeleteMessage",
                        "sqs:GetQueueAttributes"
                    ],
                    resources=[state_change_sqs.queue_arn]
                )])

        # IAM Policies
        pol_ec2_states_ro = ManagedPolicy(self, "pol_EC2StatesReadOnly",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DescribeStream",
                        "dynamodb:GetRecords",
                        "dynamodb:GetItem",
                        "dynamodb:GetShardIterator",
                        "dynamodb:ListStreams"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_states_rwd = ManagedPolicy(
            self, "pol_EC2StatesWriteDelete",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_states.table_arn]
                )])

        pol_ec2_inventory_full = ManagedPolicy(
            self, "pol_EC2InventoryFullAccess",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "dynamodb:DeleteItem",
                        "dynamodb:DescribeTable",
                        "dynamodb:GetItem",
                        "dynamodb:PutItem",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    resources=[tb_inventory.table_arn]
                )])
        
        pol_lambda_describe_ec2 = ManagedPolicy(
            self, "pol_LambdaDescribeEC2",
            statements=[
                PolicyStatement(
                    effect=Effect.ALLOW,
                    actions=[
                        "ec2:Describe*"
                    ],
                    resources=["*"]
                )])

        # IAM Roles
        rl_event_capture = Role(
            self,
            'rl_state_capture',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[basic_exec, sqs_access, pol_ec2_states_rwd]
            )

        rl_event_processor = Role(
            self,
            'rl_state_processor',
            assumed_by=ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                basic_exec,
                pol_ec2_states_ro,
                pol_ec2_states_rwd,
                pol_ec2_inventory_full,
                pol_lambda_describe_ec2])

        # event capture lambda
        lambda_event_capture = Function(
            self, "lambda_event_capture",
            handler="event_capture.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_capture'),
            role=rl_event_capture,
            events=[SqsEventSource(state_change_sqs)],
            environment={"state_table": tb_states.table_name}
        )

        # event processor lambda
        lambda_event_processor = Function(
            self, "lambda_event_processor",
            handler="event_processor.handler",
            runtime=Runtime.PYTHON_3_7,
            code=Code.asset('event_processor'),
            role=rl_event_processor,
            events=[
                DynamoEventSource(
                    tb_states,
                    starting_position=StartingPosition.LATEST)
            ],
            environment={
                "inventory_table": tb_inventory.table_name,
                }
        )

        # Cloudwatch Event
        event_ec2_change = Rule(
            self, "ec2_state_change",
            description="trigger on ec2 start, stop and terminate instances",
            event_pattern=EventPattern(
                source=["aws.ec2"],
                detail_type=["EC2 Instance State-change Notification"],
                detail={
                    "state": [
                        "running",
                        "stopped",
                        "terminated"]
                    }
                ),
            targets=[aws_events_targets.SqsQueue(state_change_sqs)]
        )

        # Outputs
        core.CfnOutput(self, "rl_state_capture_arn", value=rl_event_capture.role_arn)
        core.CfnOutput(self, "rl_state_processor_arn", value=rl_event_processor.role_arn)
        core.CfnOutput(self, "tb_states_arn", value=tb_states.table_arn)
        core.CfnOutput(self, "tb_inventory_arn", value=tb_inventory.table_arn)
        core.CfnOutput(self, "sqs_state_change", value=state_change_sqs.queue_arn)
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        table_name = "trainers"

        trainers_graphql_api = CfnGraphQLApi(
            self,'trainersApi',
            name="trainers-api",
            authentication_type='API_KEY'
        )
        
        CfnApiKey(
            self,'TrainersApiKey',
            api_id = trainers_graphql_api.attr_api_id

        )

        api_schema = CfnGraphQLSchema(
            self,"TrainersSchema",
            api_id = trainers_graphql_api.attr_api_id,
            definition=data_schema
        )

        trainers_table = Table(
            self, 'TrainersTable',
            table_name=table_name,
            partition_key=Attribute(
                name='id',
                type=AttributeType.STRING,

            ),

        
            billing_mode=BillingMode.PAY_PER_REQUEST,
            stream=StreamViewType.NEW_IMAGE,

            # The default removal policy is RETAIN, which means that cdk
            # destroy will not attempt to delete the new table, and it will
            # remain in your account until manually deleted. By setting the
            # policy to DESTROY, cdk destroy will delete the table (even if it
            # has data in it)
            removal_policy=core.RemovalPolicy.DESTROY  # NOT recommended for production code
        )

        trainers_table_role = Role(
            self, 'TrainersDynamoDBRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com')
        )

        trainers_table_role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                'AmazonDynamoDBFullAccess'
            )
        )

        data_source = CfnDataSource(
            self, 'TrainersDataSource',
            api_id=trainers_graphql_api.attr_api_id,
            name='TrainersDynamoDataSource',
            type='AMAZON_DYNAMODB',
            dynamo_db_config=CfnDataSource.DynamoDBConfigProperty(
                table_name=trainers_table.table_name,
                aws_region=self.region
            ),
            service_role_arn=trainers_table_role.role_arn
        )

        get_Trainer_resolver = CfnResolver(
            self, 'GetOneQueryResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Query',
            field_name='getTrainer',
            data_source_name=data_source.name,
            request_mapping_template=get_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        get_Trainer_resolver.add_depends_on(api_schema)

        get_all_trainers_resolver = CfnResolver(
            self, 'GetAllQueryResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Query',
            field_name='allTrainers',
            data_source_name=data_source.name,
            request_mapping_template=all_trainers,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        get_all_trainers_resolver.add_depends_on(api_schema)
     
        create_trainers_resolver = CfnResolver(
            self, 'CreateTrainerMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='createTrainer',
            data_source_name=data_source.name,
            request_mapping_template=create_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )

        create_trainers_resolver.add_depends_on(api_schema)

        update_trainers_resolver = CfnResolver(
            self,'UpdateMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name="Mutation",
            field_name="updateTrainers",
            data_source_name=data_source.name,
            request_mapping_template=update_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )
        update_trainers_resolver.add_depends_on(api_schema)

        delete_trainer_resolver = CfnResolver(
            self, 'DeleteMutationResolver',
            api_id=trainers_graphql_api.attr_api_id,
            type_name='Mutation',
            field_name='deleteTrainer',
            data_source_name=data_source.name,
            request_mapping_template=delete_trainer,
            response_mapping_template="$util.toJson($ctx.result)"
        )
#core
        delete_trainer_resolver.add_depends_on(api_schema)
示例#22
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        *,
        vpc: IVpc,
        cluster: ICluster,
        service: IEc2Service,
        ecs_security_group: SecurityGroup,
        deployment: Deployment,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Application", self.application_name)
        Tags.of(self).add("Deployment", deployment.value)

        security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )

        lambda_func = Function(
            self,
            "ReloadLambda",
            code=Code.from_asset("./lambdas/bananas-reload"),
            handler="index.lambda_handler",
            runtime=Runtime.PYTHON_3_8,
            timeout=Duration.seconds(120),
            environment={
                "CLUSTER": cluster.cluster_arn,
                "SERVICE": service.service_arn,
            },
            vpc=vpc,
            security_groups=[security_group, ecs_security_group],
            reserved_concurrent_executions=1,
        )
        lambda_func.add_to_role_policy(
            PolicyStatement(
                actions=[
                    "ec2:DescribeInstances",
                    "ecs:DescribeContainerInstances",
                    "ecs:DescribeTasks",
                    "ecs:ListContainerInstances",
                    "ecs:ListServices",
                    "ecs:ListTagsForResource",
                    "ecs:ListTasks",
                ],
                resources=[
                    "*",
                ],
            )
        )

        policy = ManagedPolicy(self, "Policy")
        policy.add_statements(
            PolicyStatement(
                actions=[
                    "lambda:InvokeFunction",
                ],
                resources=[lambda_func.function_arn],
            )
        )
def apply_permissions_boundary(boundary, scope):
    """Apply a permissions boundary to all IAM roles defined in the scope."""
    if boundary:
        boundary = ManagedPolicy.from_managed_policy_arn(scope, "Boundary", boundary)
        PermissionsBoundary.of(scope).apply(boundary)
    def __init__(self, scope: core.Construct, id: str, application_prefix: str,
                 suffix: str, kda_role: Role, **kwargs):
        super().__init__(scope, id, **kwargs)

        stack = Stack.of(self)
        region = stack.region

        # Create Cognito User Pool
        self.__user_pool = CfnUserPool(
            scope=self,
            id='UserPool',
            admin_create_user_config={'allowAdminCreateUserOnly': True},
            policies={'passwordPolicy': {
                'minimumLength': 8
            }},
            username_attributes=['email'],
            auto_verified_attributes=['email'],
            user_pool_name=application_prefix + '_user_pool')

        # Create a Cognito User Pool Domain using the newly created Cognito User Pool
        CfnUserPoolDomain(scope=self,
                          id='CognitoDomain',
                          domain=application_prefix + '-' + suffix,
                          user_pool_id=self.user_pool.ref)

        # Create Cognito Identity Pool
        self.__id_pool = CfnIdentityPool(
            scope=self,
            id='IdentityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[],
            identity_pool_name=application_prefix + '_identity_pool')

        trust_relationship = FederatedPrincipal(
            federated='cognito-identity.amazonaws.com',
            conditions={
                'StringEquals': {
                    'cognito-identity.amazonaws.com:aud': self.id_pool.ref
                },
                'ForAnyValue:StringLike': {
                    'cognito-identity.amazonaws.com:amr': 'authenticated'
                }
            },
            assume_role_action='sts:AssumeRoleWithWebIdentity')
        # IAM role for master user
        master_auth_role = Role(scope=self,
                                id='MasterAuthRole',
                                assumed_by=trust_relationship)
        # Role for authenticated user
        limited_auth_role = Role(scope=self,
                                 id='LimitedAuthRole',
                                 assumed_by=trust_relationship)
        # Attach Role to Identity Pool
        CfnIdentityPoolRoleAttachment(
            scope=self,
            id='userPoolRoleAttachment',
            identity_pool_id=self.id_pool.ref,
            roles={'authenticated': limited_auth_role.role_arn})
        # Create master-user-group
        CfnUserPoolGroup(scope=self,
                         id='AdminsGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='master-user-group',
                         role_arn=master_auth_role.role_arn)
        # Create limited-user-group
        CfnUserPoolGroup(scope=self,
                         id='UsersGroup',
                         user_pool_id=self.user_pool.ref,
                         group_name='limited-user-group',
                         role_arn=limited_auth_role.role_arn)
        # Role for the Elasticsearch service to access Cognito
        es_role = Role(scope=self,
                       id='EsRole',
                       assumed_by=ServicePrincipal(service='es.amazonaws.com'),
                       managed_policies=[
                           ManagedPolicy.from_aws_managed_policy_name(
                               'AmazonESCognitoAccess')
                       ])

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call([
            'pip', 'install', '-t',
            'streaming/streaming_cdk/lambda-layer/python/lib/python3.8/site-packages',
            '-r', 'streaming/streaming_cdk/bootstrap-lambda/requirements.txt',
            '--upgrade'
        ])

        requirements_layer = _lambda.LayerVersion(
            scope=self,
            id='PythonRequirementsTemplate',
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/lambda-layer'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will bootstrap the Elasticsearch cluster
        bootstrap_function_name = 'AESBootstrap'
        register_template_lambda = _lambda.Function(
            scope=self,
            id='RegisterTemplate',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/bootstrap-lambda'),
            handler='es-bootstrap.lambda_handler',
            environment={
                'REGION': region,
                'KDA_ROLE_ARN': kda_role.role_arn,
                'MASTER_ROLE_ARN': master_auth_role.role_arn
            },
            layers=[requirements_layer],
            timeout=Duration.minutes(15),
            function_name=bootstrap_function_name)

        lambda_role = register_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     bootstrap_function_name + ':*')
                ]))

        # Let the lambda assume the master role so that actions can be executed on the cluster
        # https://aws.amazon.com/premiumsupport/knowledge-center/lambda-function-assume-iam-role/
        lambda_role.add_to_policy(
            PolicyStatement(actions=['sts:AssumeRole'],
                            resources=[master_auth_role.role_arn]))

        master_auth_role.assume_role_policy.add_statements(
            PolicyStatement(actions=['sts:AssumeRole'],
                            principals=[lambda_role]))

        # List all the roles that are allowed to access the Elasticsearch cluster.
        roles = [
            ArnPrincipal(limited_auth_role.role_arn),
            ArnPrincipal(master_auth_role.role_arn),
            ArnPrincipal(kda_role.role_arn)
        ]  # The users
        if register_template_lambda and register_template_lambda.role:
            roles.append(ArnPrincipal(
                lambda_role.role_arn))  # The lambda used to bootstrap
        # Create kms key
        kms_key = Key(scope=self,
                      id='kms-es',
                      alias='custom/es',
                      description='KMS key for Elasticsearch domain',
                      enable_key_rotation=True)

        # AES Log Groups
        es_app_log_group = logs.LogGroup(scope=self,
                                         id='EsAppLogGroup',
                                         retention=logs.RetentionDays.ONE_WEEK,
                                         removal_policy=RemovalPolicy.RETAIN)

        # Create the Elasticsearch domain
        es_domain_arn = stack.format_arn(service='es',
                                         resource='domain',
                                         resource_name=application_prefix +
                                         '/*')

        es_access_policy = PolicyDocument(statements=[
            PolicyStatement(principals=roles,
                            actions=[
                                'es:ESHttpGet', 'es:ESHttpPut',
                                'es:ESHttpPost', 'es:ESHttpDelete'
                            ],
                            resources=[es_domain_arn])
        ])
        self.__es_domain = es.CfnDomain(
            scope=self,
            id='searchDomain',
            elasticsearch_cluster_config={
                'instanceType': 'r5.large.elasticsearch',
                'instanceCount': 2,
                'dedicatedMasterEnabled': True,
                'dedicatedMasterCount': 3,
                'dedicatedMasterType': 'r5.large.elasticsearch',
                'zoneAwarenessEnabled': True,
                'zoneAwarenessConfig': {
                    'AvailabilityZoneCount': '2'
                },
            },
            encryption_at_rest_options={
                'enabled': True,
                'kmsKeyId': kms_key.key_id
            },
            node_to_node_encryption_options={'enabled': True},
            ebs_options={
                'volumeSize': 10,
                'ebsEnabled': True
            },
            elasticsearch_version='7.9',
            domain_name=application_prefix,
            access_policies=es_access_policy,
            cognito_options={
                'enabled': True,
                'identityPoolId': self.id_pool.ref,
                'roleArn': es_role.role_arn,
                'userPoolId': self.user_pool.ref
            },
            advanced_security_options={
                'enabled': True,
                'internalUserDatabaseEnabled': False,
                'masterUserOptions': {
                    'masterUserArn': master_auth_role.role_arn
                }
            },
            domain_endpoint_options={
                'enforceHttps': True,
                'tlsSecurityPolicy': 'Policy-Min-TLS-1-2-2019-07'
            },
            # log_publishing_options={
            #     # 'ES_APPLICATION_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': es_app_log_group.log_group_arn
            #     # },
            #     # 'AUDIT_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'SEARCH_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # },
            #     # 'INDEX_SLOW_LOGS': {
            #     #     'enabled': True,
            #     #     'cloud_watch_logs_log_group_arn': ''
            #     # }
            # }
        )

        # Not yet on the roadmap...
        # See https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/283
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmEnabled', True)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmCount', 2)
        # self.es_domain.add_property_override('ElasticsearchClusterConfig.WarmType', 'ultrawarm1.large.elasticsearch')

        # Deny all roles from the authentication provider - users must be added to groups
        # This lambda function will bootstrap the Elasticsearch cluster
        cognito_function_name = 'CognitoFix'
        cognito_template_lambda = _lambda.Function(
            scope=self,
            id='CognitoFixLambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                'streaming/streaming_cdk/cognito-lambda'),
            handler='handler.handler',
            environment={
                'REGION': scope.region,
                'USER_POOL_ID': self.__user_pool.ref,
                'IDENTITY_POOL_ID': self.__id_pool.ref,
                'LIMITED_ROLE_ARN': limited_auth_role.role_arn
            },
            timeout=Duration.minutes(15),
            function_name=cognito_function_name)

        lambda_role = cognito_template_lambda.role
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogGroup'],
                resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(
            PolicyStatement(
                actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                resources=[
                    stack.format_arn(service='logs',
                                     resource='log_group',
                                     resource_name='/aws/lambda/' +
                                     cognito_function_name + ':*')
                ]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-idp:ListUserPoolClients'],
                            resources=[self.user_pool.attr_arn]))
        lambda_role.add_to_policy(
            PolicyStatement(actions=['iam:PassRole'],
                            resources=[limited_auth_role.role_arn]))

        cognito_id_res = Fn.join(':', [
            'arn:aws:cognito-identity', scope.region, scope.account,
            Fn.join('/', ['identitypool', self.__id_pool.ref])
        ])

        lambda_role.add_to_policy(
            PolicyStatement(actions=['cognito-identity:SetIdentityPoolRoles'],
                            resources=[cognito_id_res]))

        # Get the Domain Endpoint and register it with the lambda as environment variable.
        register_template_lambda.add_environment(
            'DOMAIN', self.__es_domain.attr_domain_endpoint)

        CfnOutput(scope=self,
                  id='createUserUrl',
                  description="Create a new user in the user pool here.",
                  value="https://" + scope.region +
                  ".console.aws.amazon.com/cognito/users?region=" +
                  scope.region + "#/pool/" + self.user_pool.ref + "/users")
        CfnOutput(scope=self,
                  id='kibanaUrl',
                  description="Access Kibana via this URL.",
                  value="https://" + self.__es_domain.attr_domain_endpoint +
                  "/_plugin/kibana/")

        bootstrap_lambda_provider = Provider(
            scope=self,
            id='BootstrapLambdaProvider',
            on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                       id='ExecuteRegisterTemplate',
                       service_token=bootstrap_lambda_provider.service_token,
                       properties={'Timeout': 900})

        cognito_lambda_provider = Provider(
            scope=self,
            id='CognitoFixLambdaProvider',
            on_event_handler=cognito_template_lambda)
        cognito_fix_resource = CustomResource(
            scope=self,
            id='ExecuteCognitoFix',
            service_token=cognito_lambda_provider.service_token)
        cognito_fix_resource.node.add_dependency(self.__es_domain)
示例#25
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")