示例#1
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        role = iam.Role(
            self,
            'Ec2Role',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore'),
            ],
        )

        sg = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=vpc,
        )

        # TODO: add this to userdata
        # yum install java-1.8.0-devel -y
        # wget https://archive.apache.org/dist/tinkerpop/3.4.1/apache-tinkerpop-gremlin-console-3.4.1-bin.zip
        # unzip apache-tinkerpop-gremlin-console-3.4.1-bin.zip
        # cd apache-tinkerpop-gremlin-console-3.4.1
        # wget https://www.amazontrust.com/repository/SFSRootCAG2.pem

        ec2.Instance(
            self,
            'Instance',
            role=role,
            vpc=vpc,
            security_group=sg,
            instance_type=ec2.InstanceType.of(
                instance_class=ec2.InstanceClass.BURSTABLE3_AMD,
                instance_size=ec2.InstanceSize.NANO,
            ),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, ),
        )

        self.role = role
        self.security_group = sg
示例#2
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 config: dict, region: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        ### EC2 Server for Jenkins
        image = ec2.GenericLinuxImage({
            region: config["ami_id"],
        }, )

        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        subnet = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE).subnets[0]
        subnet_selection = ec2.SubnetSelection(subnets=[subnet])

        self.security_group = ec2.SecurityGroup(self, "EC2SG", vpc=vpc)

        self._instance = ec2.Instance(self,
                                      "EC2",
                                      instance_type=ec2.InstanceType(
                                          config["instance_type"]),
                                      machine_image=image,
                                      vpc=vpc,
                                      vpc_subnets=subnet_selection,
                                      role=role,
                                      security_group=self.security_group)

        ### Lambda for github webhooks
        self._webhook_forwarder = _lambda.Function(
            self,
            "WebHookForwarder",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(dirname, "lambda", "webhook_forwarder")),
            handler="lambda_function.lambda_handler",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets))
    def __init__(self,
                 scope,
                 id,
                 *,
                 name=None,
                 directory=None,
                 bucket=None,
                 key=None) -> None:
        super().__init__(scope, id)
        # ==================================================
        # ================= IAM ROLE =======================
        # ==================================================
        lambda_role = iam.Role(
            scope=self,
            id='lambda_role',
            assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AWSLambdaExecute')
            ])

        # ==================================================
        # =================== ECR IMAGE ====================
        # ==================================================
        ecr_image = aws_lambda.DockerImageCode.from_image_asset(
            repository_name=name, directory=directory)

        # ==================================================
        # ================ LAMBDA FUNCTION =================
        # ==================================================
        self.lambda_function = aws_lambda.DockerImageFunction(
            scope=self,
            id='lambda',
            function_name=name,
            code=ecr_image,
            memory_size=1024,
            role=lambda_role,
            environment={
                'BUCKET': bucket,
                'KEY': key
            },
            timeout=core.Duration.seconds(60))
示例#4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC
        vpc = ec2.Vpc(self, "VPC",
            nat_gateways=0,
            subnet_configuration=[ec2.SubnetConfiguration(name="public",subnet_type=ec2.SubnetType.PUBLIC)]
            )

        # AMI 
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
            )

        # Instance Role and SSM Managed Policy
        role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"))

        # Instance
        instance = ec2.Instance(self, "Instance",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=amzn_linux,
            vpc = vpc,
            role = role
            )

        # Script in S3 as Asset
        asset = Asset(self, "Asset", path=os.path.join(dirname, "configure.sh"))
        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key
        )

        # Userdata executes script from S3
        instance.user_data.add_execute_file_command(
            file_path=local_path
            )
        asset.grant_read(instance.role)
    def _create_lambda(self):
        role = iam.Role(
            self,
            "LambdaPrepareDbRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            description="Role for Lambda preparing RDS",
            role_name=f"{self.name_prefix}-lambda-prepare-db-role",
            managed_policies=[
                #iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaVPCAccessExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite"),
            ],
        )

        lambda_function_id = f"{self.name_prefix}-prepare_db_function"
        lambda_function_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/prepare_db_function/"
        lambda_layer_path = str(pathlib.Path(
            __file__).resolve().parent) + "/lambdafn/lambda_layer/"

        layer = aws_lambda.LayerVersion(
            self, 'Layer', code=aws_lambda.AssetCode(lambda_layer_path))

        lambda_fn = aws_lambda.Function(
            scope=self,
            id=lambda_function_id,
            function_name=lambda_function_id,
            code=aws_lambda.AssetCode(path=lambda_function_path),
            handler="lambda_handler.lambda_handler",
            layers=[layer],
            timeout=Duration.seconds(300),
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            role=role,
            description="write some description for this lambda",
            security_groups=[self.security_group],
            vpc=self.vpc,
            vpc_subnets=self.subnet_selection)

        lambda_fn.add_environment('SECRETS_NAME', self.rds.secret.secret_arn)
        lambda_fn.add_environment('REGION_NAME', self.region)
示例#6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        # Create ECR Repository
        ghost_repo = ecr.Repository(
            self, "GhostRepo",
            repository_name="ghost"
        )

        # Create IAM Role For CodeBuild
        ghost_build_role = iam.Role(
            self, "GhostBuildRole",
            assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name("EC2InstanceProfileForImageBuilderECRContainerBuilds")
            ]
        )

        # We only want to fire on the master branch and if there is a change in the dockerbuild folder
        git_hub_source = codebuild.Source.git_hub(
            owner="jasonumiker",
            repo="k8s-plus-aws-gitops",
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(codebuild.EventAction.PUSH).and_branch_is("master").and_file_path_is("dockerbuild/*")
            ]
        )

        # Create CodeBuild
        build_project = codebuild.Project(
            self, "GhostBuildProject",
            source=git_hub_source,
            role=ghost_build_role,
            build_spec=codebuild.BuildSpec.from_source_filename("dockerbuild/buildspec.yml"),
            environment={
                'privileged': True,
            },
            environment_variables={
                'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable(value=self.account),
                'IMAGE_REPO_NAME': codebuild.BuildEnvironmentVariable(value=ghost_repo.repository_name)
            }
        )
示例#7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        vpc = ec2.Vpc(self,
                      "NewInstanceVPC",
                      nat_gateways=0,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              name="public", subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        role = iam.Role(self,
                        "InstanceSSM",
                        assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        instance = ec2.Instance(
            self,
            "CDKNewInstance",
            instance_type=ec2.InstanceType("t3.nano"),
            key_name="arronmoore_com_v2",
            machine_image=amzn_linux,
            vpc=vpc,
            security_group=self.configure_security_group(vpc),
            role=role)

        asset = Asset(self,
                      "NewInstanceConfigureScript",
                      path="./new_instance/configure.sh")

        local_path = instance.user_data.add_s3_download_command(
            bucket=asset.bucket, bucket_key=asset.s3_object_key)

        instance.user_data.add_execute_file_command(file_path=local_path)
    def __init__(self, scope: core.Construct, id: str, secgroup_name: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        with open('common/common_cdk/lambda/empty_security_group.py', 'r') as f:
            lambda_source = f.read()

        # lambda utils to empty security group before deletion
        empty_secgroup_lambda = _lambda.SingletonFunction(self, 'EmptySecurityGroupLambda',
                                                          uuid="dfs3k8730-4ee1-11e8-9c2d-fdfs65dfsc",
                                                          runtime=_lambda.Runtime.PYTHON_3_7,
                                                          code=_lambda.Code.inline(lambda_source),
                                                          handler='index.handler',
                                                          function_name='ara-auto-empty-secgroup'
                                                          )

        empty_secgroup_lambda_role = _iam.Role(
            self, 'AutoEmptyBucketLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        empty_secgroup_lambda_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'ec2:RevokeSecurityGroupIngress',
                    'ec2:RevokeSecurityGroupEgress'
                ],
                resources=['arn:aws:ec2::'+core.Aws.ACCOUNT_ID+':security-group/'+secgroup_name]
            )
        )

        empty_secgroup_lambda_provider = _custom_resources.Provider(
            self, 'EmptyBucketLambdaProvider',
            on_event_handler=empty_secgroup_lambda
        )

        core.CustomResource(
            self, 'EmptyBucketCustomResource',
            service_token=empty_secgroup_lambda_provider.service_token,
            properties={
                "secgroup_name": secgroup_name
            }
        )
示例#9
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 db_instance_class: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        stack = core.Stack.of(self)

        # TODO: find a way to add role to cluster
        role = iam.Role(
            self,
            'NeptuneRole',
            assumed_by=iam.ServicePrincipal('rds.amazonaws.com'),
        )

        sg = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=vpc,
        )

        subnet_group = neptune.CfnDBSubnetGroup(
            self,
            'SubnetGroup',
            db_subnet_group_name='{}-subnet-group'.format(
                stack.stack_name.lower()),
            db_subnet_group_description='Private subnets',
            subnet_ids=[subnet.subnet_id for subnet in vpc.private_subnets])

        cluster = neptune.CfnDBCluster(
            self,
            'Cluster',
            db_subnet_group_name=subnet_group.ref,
            vpc_security_group_ids=[sg.security_group_id])

        neptune.CfnDBInstance(
            self,
            'Instance',
            db_cluster_identifier=cluster.ref,
            db_instance_class=db_instance_class,
        )

        self.endpoint = cluster.attr_endpoint
        self.role = role
        self.security_group = sg
示例#10
0
    def create_sagemaker_train_role(self):
        # Config role
        base_role = iam.Role(
            self,
            "gw_sagemaker_train_role",
            assumed_by=iam.ServicePrincipal("sagemaker.amazonaws.com"))
        base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
        base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSageMakerFullAccess"))
        base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonElasticContainerRegistryPublicFullAccess"))
        base_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonEC2ContainerRegistryFullAccess"))

        return base_role
示例#11
0
 def get_role(self, unique_name: str, service_principal: str) -> iam.Role:
     """
     Get the default role for the datajob. We use administrator access
     as the policy for our default role.
     # todo - we probably want to refine the policies for this role
     :param unique_name: a unique name we can give to our role.
     :param service_principal: what is the service principal for our service.
     for example: glue.amazonaws.com
     :return: iam role object.
     """
     role_name = unique_name + "-role"
     logger.debug(f"creating role {role_name}")
     return iam.Role(
         self,
         role_name,
         assumed_by=iam.ServicePrincipal(service_principal),
         managed_policies=[
             iam.ManagedPolicy.from_aws_managed_policy_name("AdministratorAccess")
         ],
     )
示例#12
0
    def create_crawler_role(self):
        """Crate a role used by crawlers in data lake."""
        service = iam.ServicePrincipal("glue.amazonaws.com")
        managed_policies = [
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AWSGlueServiceRole"
            ),
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3ReadOnlyAccess"
            ),
        ]

        id_suffix = self.database_name.replace("_", "-")
        self._crawler_role = iam.Role(
            scope=self,
            id=f"oedi-data-lake-crawler-role--{id_suffix}",
            role_name=f"oedi_data_lake_cralwer_role__{self.database_name}",
            assumed_by=service,
            managed_policies=managed_policies,
        )
示例#13
0
    def __init__(self, scope, id, *, description=None, env=None, tags=None, synthesizer=None, iam_user=None,
                 vpc_id=None, default_capacity=4, default_instance_type='t3a.large'):
        super().__init__(scope, id, description=description, env=env, tags=tags,
                         synthesizer=synthesizer)

        if vpc_id:
            vpc = ec2.Vpc.from_lookup(self, "VPC",
                                      vpc_id=vpc_id)
        else:
            vpc = ec2.Vpc(self, f"kodexa-vpc-{id}",
                          max_azs=2,
                          cidr="10.10.0.0/16",
                          subnet_configuration=[ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PUBLIC,
                              name="Public",
                              cidr_mask=24
                          ), ec2.SubnetConfiguration(
                              subnet_type=ec2.SubnetType.PRIVATE,
                              name="Private",
                              cidr_mask=24
                          )],
                          nat_gateways=1,
                          )

            core.CfnOutput(self, "Output",
                           value=vpc.vpc_id)

        # Create K8S cluster

        cluster_admin = iam.Role(self, f"kodexa-eks-adminrole-{id}", assumed_by=iam.AccountRootPrincipal())

        cluster = eks.Cluster(self, id=f'kodexa-eks-cluster-{id}', cluster_name=f'kodexa-eks-cluster-{id}',
                              version=eks.KubernetesVersion.V1_17,
                              vpc=vpc,
                              default_capacity_instance=ec2.InstanceType(default_instance_type),
                              default_capacity=default_capacity,
                              masters_role=cluster_admin)

        if iam_user:
            admin_user = iam.User.from_user_name(id='cluster-admin-iam-user', user_name=iam_user, scope=self)
            cluster.aws_auth.add_user_mapping(admin_user, groups=['system:masters'])
示例#14
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_role = iam.Role(
            self,
            'IapTesLambdaRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole')
            ])

        function = lmbda.Function(
            self,
            'IapTesLambda',
            function_name='rnasum_iap_tes_lambda_dev',
            handler='iap_tes.lambda_handler',
            runtime=lmbda.Runtime.PYTHON_3_7,
            code=lmbda.Code.from_asset('lambdas/iap_tes'),
            role=lambda_role,
            timeout=core.Duration.seconds(20),
            environment={
                'IAP_API_BASE_URL': props['iap_api_base_url'],
                'TASK_ID': props['task_id'],
                'TASK_VERSION_WTS': props['task_version_wts'],
                'TASK_VERSION_WGS': props['task_version_wgs'],
                'SSM_PARAM_NAME': props['ssm_param_name'],
                'GDS_REFDATA_FOLDER': props['gds_refdata_folder'],
                'GDS_LOG_FOLDER': props['gds_log_folder'],
                'RNASUM_IMAGE_NAME': props['rnasum_image_name'],
                'RNASUM_IMAGE_TAG': props['rnasum_image_tag'],
                'REFDATA_NAME': props['ref_data_name']
            })

        secret_value = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "RNAsumJwtToken",
            parameter_name=props['ssm_param_name'],
            version=props['ssm_param_version'])
        secret_value.grant_read(function)
 def build_lambda_role(self, name) -> iam.Role:
     return iam.Role(
         self,
         f"{name}-Role",
         assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
         inline_policies={
             "LambdaFunctionServiceRolePolicy":
             iam.PolicyDocument(statements=[
                 iam.PolicyStatement(
                     actions=[
                         "logs:CreateLogGroup",
                         "logs:CreateLogStream",
                         "logs:PutLogEvents",
                     ],
                     resources=[
                         f"arn:{Aws.PARTITION}:logs:{Aws.REGION}:{Aws.ACCOUNT_ID}:log-group:/aws/lambda/*"
                     ],
                 )
             ])
         },
     )
示例#16
0
    def _create_autoscaling_role(scope: core.Construct, id: str, *, role_name: Optional[str] = None):
        role = iam.Role(
            scope, id, role_name=role_name,
            assumed_by=iam.ServicePrincipal('elasticmapreduce.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AmazonElasticMapReduceforAutoScalingRole')
            ])

        role.assume_role_policy.add_statements(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                principals=[
                    iam.ServicePrincipal('application-autoscaling.amazonaws.com')
                ],
                actions=[
                    'sts:AssumeRole'
                ]
            )
        )
        return role
示例#17
0
    def _connect_pinpoint_to_firehose_delivery_stream(self, *, stack):

        self.pinpoint_firehose_role = iam.Role(
            stack,
            'PinPointToFirehoseRole',
            assumed_by=iam.ServicePrincipal('pinpoint.amazonaws.com'))

        self.pinpoint_firehose_role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=[
                                    "firehose:PutRecordBatch",
                                    "firehose:DescribeDeliveryStream"
                                ],
                                resources=[self.kfh_instance.attr_arn]))

        self.pinpoint_to_kfh = pinpoint.CfnEventStream(
            stack,
            'pinpointclickstreamtokfh',
            application_id=self.pinpoint_instance.ref,
            destination_stream_arn=self.kfh_instance.attr_arn,
            role_arn=self.pinpoint_firehose_role.role_arn)
    def create_wic_provider_test_role(self) -> None:
        wic_provider_test_role_condition = {
            "StringEquals": {"graph.facebook.com:app_id": self._facebook_app_id}
        }

        wic_provider_test_role = aws_iam.Role(
            self,
            "wic_provider_test_role",
            assumed_by=aws_iam.FederatedPrincipal(
                "graph.facebook.com",
                wic_provider_test_role_condition,
                "sts:AssumeRoleWithWebIdentity",
            ),
        )
        wic_provider_test_role.add_to_policy(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW, actions=["translate:TranslateText"], resources=["*"]
            )
        )

        self.parameters_to_save["WICProviderTestRoleArn"] = wic_provider_test_role.role_arn
示例#19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(
            self,
            'Vpc',
            cidr='10.11.12.0/24',
            max_azs=2,
            nat_gateways=1,
        )

        role = iam.Role(
            self,
            'Ec2SsmRole',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore')
            ],
        )

        instance = ec2.Instance(
            self,
            'Instance',
            role=role,
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets),
            instance_type=ec2.InstanceType.of(
                instance_class=ec2.InstanceClass.BURSTABLE4_GRAVITON,
                instance_size=ec2.InstanceSize.NANO,
            ),
            machine_image=ec2.AmazonLinuxImage(
                cpu_type=ec2.AmazonLinuxCpuType.ARM_64,
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            ),
        )

        # TODO: Wait for EnclaveOptions to be supported by CDK
        # https://github.com/aws/aws-cdk/issues/12170
        instance.instance.hibernation_options = {'configured': True}
    def __init__(self, scope: cdk.Construct, construct_id: str, stage: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Define Lambda function
        lambda_role = iam.Role(
            self,
            "ExplainRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ],
        )

        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMFullAccess"))

        self.explain_bot_lambda = _lambda.Function(
            self,
            "ExplainHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset("lambda"),
            handler="explain.lambda_handler",
            role=lambda_role,
            timeout=cdk.Duration.minutes(5),
            environment={"STAGE": stage},
        )

        self.add_meaning_lambda = _lambda.Function(
            self,
            "AddMeaningHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset("lambda"),
            handler="add_meaning.lambda_handler",
            role=lambda_role,
            timeout=cdk.Duration.minutes(5),
            environment={"STAGE": stage},
        )
    def _create_dispatcher_lambda(self, id: str,
                                  event_bus: aws_events.EventBus) -> None:
        # bind dispatch lambda to dynamo db with an IAM to put events in the event bus
        lambda_role = iam.Role(
            self,
            'TenantMgmtDispatch',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'TenantMgmtDispatchPolicy':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(actions=['events:PutEvents'],
                                        resources=[event_bus.event_bus_arn],
                                        effect=iam.Effect.ALLOW),
                ]),
                "TracingPolicy":
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=["ssm:Describe*", "ssm:Get*", "ssm:List*"],
                        resources=["arn:aws:ssm:*"],
                        effect=iam.Effect.ALLOW,
                    ),
                ]),
            },
            managed_policies=[self._cloudwatch_logs_policy],
        )

        return aws_lambda.Function(
            self,
            f'{id}Dispatcher',
            function_name=f'{id}Dispatcher',
            code=aws_lambda.Code.from_asset(".build/tenant_crud_dispatcher"),
            handler='functions.crud_dispatcher.crud_dispatch_dynamodb_source',
            environment={
                'EVENT_BUS_NAME':
                event_bus.event_bus_name,  # where to put the events into
            },
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            role=lambda_role,
            memory_size=192,
        )
示例#22
0
    def __init__(self, scope: core.Construct, id: str, vpc,KeyPairName,ec2_type, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create SQL instances
        azs = vpc.availability_zones
        self.sql_sg = ec2.SecurityGroup(self, 'SQL-Security-Group',vpc=vpc,allow_all_outbound=True,description='SQL-Security-Group-Nodes',security_group_name='sql-sg-'+id)
        self.role = iam.Role(self, 'ec2-sql-role',assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'))
        
        #Grant permission to access the MAD secret
        self.role.add_managed_policy(policy=iam.ManagedPolicy.from_aws_managed_policy_name('SecretsManagerReadWrite'))

        self.node1 = ec2.Instance(self, "SQL Node1",
                                instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
                                machine_image=windows_ami,
                                vpc=vpc,
                                key_name=KeyPairName,
                                user_data=ec2.UserData.custom(user_data),
                                availability_zone=azs[0],
                                role=self.role,
                                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE,one_per_az=True),
                                security_group=self.sql_sg
                                )
        self.node2 = ec2.Instance(self, "SQL Node2",
                                instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
                                machine_image=windows_ami,
                                vpc=vpc,
                                key_name=KeyPairName,
                                user_data=ec2.UserData.custom(user_data),
                                availability_zone=azs[1],
                                role=self.role,
                                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE,one_per_az=True),
                                security_group=self.sql_sg
                                )

        # Open Security group - change to a reference
        self.sql_sg.add_ingress_rule(peer=ec2.Peer.ipv4('10.0.0.0/8'),connection=ec2.Port.all_traffic(),description='Allow traffic between SQL nodes + VPC')
        
        
        core.CfnOutput(self, "node1",value=self.node1.instance_private_ip)
        core.CfnOutput(self, "node2",value=self.node2.instance_private_ip)
示例#23
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(self, "ecs-load-test", cidr="10.0.0.0/22", max_azs=3)

        cluster = ecs.Cluster(self, "load-test-cluser", vpc=vpc)

        repository = ecr.Repository(self, "spring-boot-helloworld", image_scan_on_push=True)

        role = iam.Role(self, "ecs-allow-cw-role",
          assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        role.add_to_policy(iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=["cloudwatch:*"]
        ))

        task_definition = ecs.FargateTaskDefinition( self, "spring-boot-td",
                task_role=role,
                cpu=512,
                memory_limit_mib=2048)

        image = ecs.ContainerImage.from_ecr_repository(repository, "v24")
        container = task_definition.add_container( "spring-boot-container",
                image=image,
                logging=ecs.LogDrivers.aws_logs(stream_prefix="loadtest"))

        port_mapping = ecs.PortMapping(container_port=8080, host_port=8080)
        container.add_port_mappings(port_mapping)

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "test-service",
            cluster=cluster,
            task_definition=task_definition,
            desired_count=2,
            cpu=512,
            memory_limit_mib=2048,
            public_load_balancer=True)

        fargate_service.target_group.set_attribute("deregistration_delay.timeout_seconds", "10")
示例#24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create role for Notebook instance
        nrole = iam_.Role(self,
                          "notebookAccessRole",
                          assumed_by=iam_.ServicePrincipal("sagemaker"))

        nrole.add_managed_policy(
            iam_.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSageMakerFullAccess'))
        nrole.add_managed_policy(
            iam_.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonEC2ReadOnlyAccess'))
        notebook_uuid = str(uuid.uuid4())
        notebook_uuid = str(notebook_uuid[0:notebook_uuid.find('-')])
        notebook_instance_id = 'spot-history-notebook-' + notebook_uuid

        notebook_instance = sagemaker_.CfnNotebookInstance(
            self,
            notebook_instance_id,
            instance_type='ml.m5.xlarge',
            volume_size_in_gb=10,
            security_group_ids=default_sg,
            subnet_id=default_subnet,
            notebook_instance_name=notebook_instance_id,
            role_arn=nrole.role_arn,
            default_code_repository=github_repo,
        )

        notebook_url = "https://{}.console.aws.amazon.com/sagemaker/home?region={}#/notebook-instances/openNotebook/{}?view=classic".format(
            my_region, my_region, notebook_instance.notebook_instance_name)

        core.CfnOutput(
            self,
            "Notebook Name",
            value=notebook_url,
            description="Notebook Instance Name",
        )
示例#25
0
def eks_node_role(
    scope: core.Construct,
    id: str,
    cluster: aws_eks.ICluster,
    role_name: typing.Optional[str]=None,
) -> aws_iam.Role:
    role = aws_iam.Role(
        scope=scope,
        id=id,
        role_name=role_name,
        path='/eks/',
        assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        managed_policies=list(_eks_node_role_base_policies),
    )

    core.Tag.add(
        scope=role,
        key='eks/%s/type' % cluster.cluster_name,
        value='node'
    )

    return role
 def create_immediate_response_execution_role(
         self, function_name: str, parameter_key: str) -> iam_.Role:
     role_name = f"{function_name}-ExecutionRole"
     return iam_.Role(
         self,
         role_name,
         assumed_by=iam_.ServicePrincipal("lambda.amazonaws.com"),
         inline_policies={
             f"{function_name}-ExecutionPolicy":
             iam_.PolicyDocument(statements=[
                 iam_.PolicyStatement(
                     actions=[
                         "lambda:InvokeFunction",
                         "lambda:InvokeAsync",
                     ],
                     effect=iam_.Effect.ALLOW,
                     resources=[
                         self.func_async_worker.function_arn,
                         self.func_sync_worker.function_arn,
                     ],
                 ),
                 iam_.PolicyStatement(
                     actions=[
                         "ssm:GetParameter",
                     ],
                     effect=iam_.Effect.ALLOW,
                     resources=[
                         f"arn:aws:ssm:{self.region}:{self.account}:parameter{parameter_key}",
                     ],
                 ),
             ])
         },
         managed_policies=[
             iam_.ManagedPolicy.from_aws_managed_policy_name(
                 "service-role/AWSLambdaBasicExecutionRole"),
             # iam_.ManagedPolicy.from_aws_managed_policy_name("AWSXrayWriteOnlyAccess"),
         ],
         role_name=role_name,
     )
    def create_swift_infrastructure_role(
            self, database_instance: _rds.DatabaseInstance, instance_ids: List[str],
            mq_broker_arn: str):
        """create swift infrastructure role"""
        swift_infrastructure_role = \
            _iam.Role(self, "SWIFTInfrastructureRole",
                      role_name="SWIFTInfrastructureRole",
                      assumed_by=_iam.AccountPrincipal(account_id=self.account)
                      .with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
                      )
        instances_resource = []
        if instance_ids is not None:
            for instance_id in instance_ids:
                instances_resource.append(
                    "arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)
        statements = [
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["rds:Describe*"],
                resources=["*"]),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["rds:Start*", "rds:Stop*"],
                resources=[database_instance.instance_arn]),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["ec2:Describe*"],
                resources=["*"]),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["ec2:Start*", "ec2:Stop*"],
                resources=instances_resource),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["mq:List*", "mq:Describe*", "mq:RebootBroker"],
                resources=[mq_broker_arn]),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["logs:List*", "logs:Describe*", "logs:Get*"],
                resources=["*"])]

        _iam.Policy(
            self, "SwiftInfrastructurePolicy", policy_name="SwiftInfrastructurePolicy",
            roles=[swift_infrastructure_role], statements=statements,
            force=True)
    def create_swift_instance_operator_role(self, instance_ids):
        """create swift instance operator role"""
        swift_instance_operator_role = \
            _iam.Role(self, "SWIFTInstanceOperatorRole",
                      role_name="SWIFTInstanceOperatorRole",
                      assumed_by=_iam.AccountPrincipal(account_id=self.account)
                      .with_conditions({"Bool": {"aws:MultiFactorAuthPresent": "true"}})
                      )

        instances_resource = []
        if instance_ids is not None:
            for instance_id in instance_ids:
                instances_resource.append(
                    "arn:aws:ec2:" + self.region + ":" + self.account + ":instance/" + instance_id)

        ssm_doc_resource = "arn:aws:ssm:" + self.region + \
                           ":" + self.account + ":document/SSM-SessionManagerRunShell"

        statements = [
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW, actions=["ssm:StartSession", "ssm:SendCommand"],
                resources=[ssm_doc_resource] + instances_resource,
                conditions={"BoolIfExists": {
                    "ssm:SessionDocumentAccessCheck": "true"}}),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["ssm:DescribeSessions", "ssm:GetConnectionStatus",
                         "ssm:DescribeInstanceInformation",
                         "ssm:DescribeInstanceProperties", "ec2:DescribeInstances"],
                resources=["*"]),
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["ssm:TerminateSession"],
                resources=[
                    "arn:aws:ssm:*:*:session/${aws:username}-*"])]
        _iam.Policy(
            self, "SSMInstanceAccessPolicy", policy_name="SSMInstanceAccessPolicy",
            roles=[swift_instance_operator_role], statements=statements,
            force=True)
示例#29
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.role = aws_iam.Role(
            self,
            id="something-role",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'),
        )
        self.role.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))

        self.table = aws_dynamodb.Table(
            self,
            id="SomethingTable",
            table_name=f"something-table",
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=aws_dynamodb.Attribute(
                name='something_id', type=aws_dynamodb.AttributeType.STRING),
        )
        self.table.grant_full_access(self.role)
示例#30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ec_role = iam.Role(
            self,
            "FirstRoleCDK",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
            description="First Role created by CDK",
            inline_policies={
                "S3IAMFullAccess":
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                        actions=['s3:*'],
                                        resources=['*']),
                    iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                        actions=['iam:*'],
                                        resources=['*']),
                ])
            })
        ec_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))