def generate_dynamo_data_source(self, graphql_api, type_name):
        """
        Generates a DynamoDB data source for the given GraphQL type.  This includes the
        Dynamo table, the AppSync data source, a data source role, and the resolvers.

        NOTE: This function generates Dynamo tables with a hash key called `id`, but no
        other keys.

        :param type_name    The name of the GraphQL type.  This is the identifier which
                            appears after the `type` keyword in the schema.
        """

        table = dynamodb.Table(
            f"{self.stack_name}_{type_name}_table",
            name=f"{self.stack_name}_{self.random_chars}.{type_name}",
            hash_key="id",
            attributes=[{"name": "id", "type": "S"}],
            # stream_view_type="NEW_AND_OLD_IMAGES",
            billing_mode="PAY_PER_REQUEST",
        )

        data_source_iam_role = iam.Role(
            f"{self.stack_name}_{type_name}_role",
            assume_role_policy="""{
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Effect": "Allow",
                    "Principal": {
                        "Service": "appsync.amazonaws.com"
                    },
                    "Action": "sts:AssumeRole"
                }
            ]
        }""",
        )

        aws_region = config.region
        account_id = get_caller_identity().account_id

        data_source_iam_role_policy = iam.RolePolicy(
            f"{self.stack_name}_{type_name}_role_policy",
            role=data_source_iam_role.name,
            name="MyDynamoDBAccess",
            policy=table.name.apply(
                lambda table_name: f"""{{
            "Version": "2012-10-17",
            "Statement": [
                {{
                    "Effect": "Allow",
                    "Action": [
                        "dynamodb:BatchGetItem",
                        "dynamodb:BatchWriteItem",
                        "dynamodb:PutItem",
                        "dynamodb:DeleteItem",
                        "dynamodb:GetItem",
                        "dynamodb:Scan",
                        "dynamodb:Query",
                        "dynamodb:UpdateItem"
                    ],
                    "Resource": [
                        "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}",
                        "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}/*"
                    ]
                }}
            ]
        }}"""
            ),
        )

        data_source = appsync.DataSource(
            f"{self.stack_name}_{type_name}_data_source",
            api_id=graphql_api.id,
            name=f"{type_name}TableDataSource_{self.random_chars}",
            type="AMAZON_DYNAMODB",
            service_role_arn=data_source_iam_role.arn,
            dynamodb_config={"table_name": table.name},
            opts=ResourceOptions(depends_on=[data_source_iam_role]),
        )

        resolvers = self.generate_resolvers(graphql_api, type_name, data_source)

        return {
            "table": table,
            "data_source_iam_role": data_source_iam_role,
            "data_source_iam_role_policy": data_source_iam_role_policy,
            "data_source": data_source,
            "resolvers": resolvers,
        }
Beispiel #2
0
    def __init__(
        self,
        name,
        should_create_gtm_tag=True,
        site_name: Input[str] = None,
        site_url: Input[str] = None,
        opts=None,
    ):
        """
        :param should_create_gtm_tag: Whether or not a GTM environment should be created
                with a tag for calling Amplify and Google Analytics.
        :param site_name: The website name used for the Google Analytics property.  If
                `should_create_gtm_tag` is `True`, this is required.
        :param site_url: The website URL used for the Google Analytics property.  If
                `should_create_gtm_tag` is `True`, this is required.
        """
        super().__init__("nuage:aws:Analytics", name, None, opts)

        account_id = get_caller_identity().account_id
        region = config.region

        bucket = s3.Bucket(f"{name}Bucket")

        firehose_role = iam.Role(
            f"{name}FirehoseRole",
            assume_role_policy=get_firehose_role_trust_policy_document(account_id),
        )

        delivery_stream = kinesis.FirehoseDeliveryStream(
            f"{name}DeliveryStream",
            destination="extended_s3",
            extended_s3_configuration={
                "bucketArn": bucket.arn,
                "role_arn": firehose_role.arn,
                "compressionFormat": "GZIP",
            },
            opts=ResourceOptions(depends_on=[bucket, firehose_role]),
        )

        firehose_role_policy = iam.RolePolicy(
            f"{name}DeliveryStreamPolicy",
            role=firehose_role.name,
            policy=get_firehose_role_policy_document(
                region, account_id, bucket.arn, delivery_stream.name
            ).apply(json.dumps),
        )

        pinpoint_app = pinpoint.App(f"{name}PinpointApp")

        pinpoint_stream_role = iam.Role(
            f"{name}PinpointStreamRole",
            assume_role_policy=get_pinpoint_stream_role_trust_policy_document(),
        )

        pinpoint_stream_role_policy = iam.RolePolicy(
            f"{name}PinpointStreamPolicy",
            role=pinpoint_stream_role.name,
            policy=get_pinpoint_stream_role_policy_document(
                region, account_id, delivery_stream.name, pinpoint_app.application_id
            ).apply(json.dumps),
            opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]),
        )

        # IAM roles can take time to propogate so we have to add an artificial delay
        pinpoint_stream_role_delay = Delay(
            "EventStreamRoleDelay",
            10,
            opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]),
        )

        pinpoint_stream = pinpoint.EventStream(
            f"{name}PinpointEventStream",
            application_id=pinpoint_app.application_id,
            destination_stream_arn=delivery_stream.arn,
            role_arn=pinpoint_stream_role.arn,
            opts=ResourceOptions(
                depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,]
            ),
        )

        outputs = {
            "bucket_name": bucket.id,
            "delivery_stream_name": delivery_stream.name,
            "destination_stream_arn": delivery_stream.arn,
            "pinpoint_application_name": pinpoint_app.name,
            "pinpoint_application_id": pinpoint_app.application_id,
            "gtm_container_id": None,
            "gtm_tag": None,
            "gtm_tag_no_script": None,
            "amplify_tag_id": None,
            "event_name": None,
        }

        if should_create_gtm_tag:

            if site_name is None:
                raise Exception("The site_name parameter is required for the GTM tag")

            if site_url is None:
                raise Exception("The site_url parameter is required for the GTM tag")

            gtm = GtmAnalytics(name, site_name, site_url)

            outputs = {
                **outputs,
                "gtm_container_id": gtm.container_id,
                "gtm_tag": gtm.tag,
                "gtm_tag_no_script": gtm.tag_no_script,
                "amplify_tag_id": gtm.amplify_tag_id,
                "event_name": gtm.event_name,
            }

        self.set_outputs(outputs)
Beispiel #3
0
    def __init__(
        self,
        name,
        vpc_environment: VPC,
        efs_environment: EFS,
        github_repo_name: Input[str],
        github_version_name: Input[str] = None,
        opts=None,
    ):
        super().__init__("nuage:aws:DevelopmentEnvironment:CodeBuild",
                         f"{name}CodebuildEnvironment", None, opts)

        # TODO pass this in - with a default?
        def get_codebuild_serice_role_policy():
            return {
                "Version": "2012-10-17",
                "Statement": [{
                    "Action": "*",
                    "Effect": "Allow",
                    "Resource": "*"
                }]
            }

        account_id = get_caller_identity().account_id

        #TODO add random chars on the end of default name to prevent conflicts
        project_name = f"{name}BuildDeploy"

        pulumi_token_param = ssm.Parameter(f"{name}PulumiAccessToken",
                                           type="SecureString",
                                           value="none")

        codebuild_vpc_policy = iam.Policy(
            f"{name}CodeBuildVpcPolicy",
            policy=get_codebuild_vpc_policy(
                account_id,
                vpc_environment.private_subnet.id).apply(json.dumps))

        codebuild_base_policy = iam.Policy(f"{name}CodeBuildBasePolicy",
                                           policy=json.dumps(
                                               get_codebuild_base_policy(
                                                   account_id, project_name)))

        codebuild_service_role_policy = iam.Policy(
            f"{name}CodeBuildServiceRolePolicy",
            policy=json.dumps(get_codebuild_serice_role_policy()))

        codebuild_service_role = iam.Role(f"{name}CodeBuildRole",
                                          assume_role_policy="""{
        "Version": "2012-10-17",
        "Statement": [
            {
            "Effect": "Allow",
            "Principal": {
                "Service": "codebuild.amazonaws.com"
            },
            "Action": "sts:AssumeRole"
            }
        ]
        }""")

        codebuild_vpn_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildVpnAttachment",
            policy_arn=codebuild_vpc_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_base_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildBaseAttachment",
            policy_arn=codebuild_base_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_service_role_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildServiceRoleAttachment",
            policy_arn=codebuild_service_role_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_project = codebuild.Project(
            f"{name}CodeBuildProject",
            description="Builds and deploys the stack",
            name=project_name,
            vpc_config={
                "vpc_id": vpc_environment.vpc.id,
                "subnets": [vpc_environment.private_subnet],
                "security_group_ids": [vpc_environment.security_group.id]
            },
            source={
                "type": "GITHUB",
                "location": github_repo_name
            },
            source_version=github_version_name,
            artifacts={"type": "NO_ARTIFACTS"},
            environment={
                "image":
                "aws/codebuild/amazonlinux2-x86_64-standard:2.0",
                "privileged_mode":
                True,
                "type":
                "LINUX_CONTAINER",
                "compute_type":
                "BUILD_GENERAL1_SMALL",
                "environment_variables": [{
                    "name": "PULUMI_ACCESS_TOKEN",
                    "type": "PARAMETER_STORE",
                    "value": pulumi_token_param.name
                }, {
                    "name":
                    "FILESYSTEM_ID",
                    "type":
                    "PLAINTEXT",
                    "value":
                    efs_environment.file_system_id
                }]
            },
            service_role=codebuild_service_role.arn,
            opts=ResourceOptions(depends_on=[vpc_environment]))

        outputs = {"pulumi_token_param_name": pulumi_token_param.name}

        self.set_outputs(outputs)
    def __init__(self, name, opts=None):
        super().__init__("nuage:aws:Analytics", name, None, opts)

        account_id = get_caller_identity().account_id
        region = config.region

        bucket = s3.Bucket(f"{name}Bucket")

        firehose_role = iam.Role(
            f"{name}FirehoseRole",
            assume_role_policy=get_firehose_role_trust_policy_document(account_id),
        )

        delivery_stream = kinesis.FirehoseDeliveryStream(
            f"{name}DeliveryStream",
            destination="extended_s3",
            extended_s3_configuration={
                "bucketArn": bucket.arn,
                "role_arn": firehose_role.arn,
                "compressionFormat": "GZIP",
            },
            opts=ResourceOptions(depends_on=[bucket, firehose_role]),
        )

        firehose_role_policy = iam.RolePolicy(
            f"{name}DeliveryStreamPolicy",
            role=firehose_role.name,
            policy=get_firehose_role_policy_document(
                region, account_id, bucket.arn, delivery_stream.name
            ).apply(json.dumps),
        )

        pinpoint_app = pinpoint.App(f"{name}PinpointApp")

        pinpoint_stream_role = iam.Role(
            f"{name}PinpointStreamRole",
            assume_role_policy=get_pinpoint_stream_role_trust_policy_document(),
        )

        pinpoint_stream_role_policy = iam.RolePolicy(
            f"{name}PinpointStreamPolicy",
            role=pinpoint_stream_role.name,
            policy=get_pinpoint_stream_role_policy_document(
                region, account_id, delivery_stream.name, pinpoint_app.application_id
            ).apply(json.dumps),
            opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]),
        )

        # IAM roles can take time to propogate so we have to add an artificial delay
        pinpoint_stream_role_delay = Delay(
            "EventStreamRoleDelay",
            10,
            opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]),
        )

        pinpoint_stream = pinpoint.EventStream(
            f"{name}PinpointEventStream",
            application_id=pinpoint_app.application_id,
            destination_stream_arn=delivery_stream.arn,
            role_arn=pinpoint_stream_role.arn,
            opts=ResourceOptions(
                depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,]
            ),
        )

        self.set_outputs(
            {
                "bucket_name": bucket.id,
                "delivery_stream_name": delivery_stream.name,
                "destination_stream_arn": delivery_stream.arn,
                "pinpoint_application_name": pinpoint_app.name,
                "pinpoint_application_id": pinpoint_app.application_id,
            }
        )
import pulumi
from pulumi import Input, ResourceOptions
from pulumi_aws import appsync, cognito, config, dynamodb, iam, s3
from pulumi_aws.get_caller_identity import get_caller_identity
from amplify_exports_file import AmplifyExportsFile

# Modify the variable below if you add new GraphQL types to the schema.

graphql_types = ["Note"]

amplify_api_name = "notespulumi"

######

aws_region = config.region
account_id = get_caller_identity().account_id

amplify_api_build_dir = Path("amplify/backend/api").joinpath(
    amplify_api_name).joinpath("build")
schema_path = amplify_api_build_dir.joinpath("schema.graphql")
schema = schema_path.read_text()

# Resources

user_pool = cognito.UserPool("MyUserPool")

user_pool_client = cognito.UserPoolClient("MyUserPoolClient",
                                          user_pool_id=user_pool.id)

stack_name = amplify_api_name