Beispiel #1
0
def pretf_variables(var):
    dev = get_outputs("vpc/dev")
    if not dev:
        raise log.bad("vpc/dev stack has no outputs")

    prod = get_outputs("vpc/prod")
    if not dev:
        raise log.bad("vpc/prod stack has no outputs")

    yield {
        "dev_vpc_id": dev["vpc_id"],
        "prod_vpc_id": prod["vpc_id"],
    }
Beispiel #2
0
def pretf_workflow(path, terraform):
    # Restrict where pretf/terraform can run to the stack directories.
    stacks_dir = Path(__file__).parent / "stacks"
    if path.cwd.parent != stacks_dir:
        log.bad("you are not in a stack directory")
        stack_dirs = [p for p in sorted(stacks_dir.iterdir()) if p.is_dir()]
        if stack_dirs:
            log.bad("found:")
            for stack_dir in stack_dirs:
                log.bad(f"* {relpath(stack_dir)}")
        return 1

    # Symlink stack.tf.py into the current directory,
    # which handles the AWS provider and S3 backend.
    # Also get the tfvars file for the current workspace.
    stack = path.cwd.name
    workspace = terraform.workspace
    created = workflow.mirror_files(
        "../stack.tf.py", f"../../params/{stack}.{workspace}.auto.tfvars")

    # Now run the standard Pretf workflow which generates files
    # and then executes Terraform.
    return workflow.default(created=created)
Beispiel #3
0
def _create_s3_backend(
    session: Session, bucket: str, table: str, region_name: str
) -> None:

    # Prompt before creating anything.
    account_id = get_account_id(session)
    bucket_arn = _get_s3_bucket_arn(region_name, account_id, bucket)
    table_arn = _get_dynamodb_table_arn(region_name, account_id, table)
    log.ok(f"backend: {bucket_arn}")
    log.ok(f"backend: {table_arn}")
    if not log.accept("backend: create backend resources"):
        log.bad("backend: not created")
        raise SystemExit(1)

    # Use the S3 bucket and DynamoDB table name for the CloudFormation stack.
    if bucket == table:
        stack_name = bucket
    else:
        stack_name = f"{bucket}-{table}"
    stack_arn = _get_cloudformation_stack_arn(region_name, account_id, stack_name)
    log.ok(f"backend: creating {stack_arn}")

    # Create the stack.
    cloudformation_client = session.client("cloudformation", region_name=region_name)
    cloudformation_client.create_stack(
        StackName=stack_name,
        ResourceTypes=["AWS::DynamoDB::Table", "AWS::S3::Bucket"],
        TemplateBody=json.dumps(
            {
                "Resources": {
                    "Table": {
                        "Type": "AWS::DynamoDB::Table",
                        "Properties": {
                            "TableName": table,
                            "AttributeDefinitions": [
                                {"AttributeName": "LockID", "AttributeType": "S"}
                            ],
                            "KeySchema": [
                                {"AttributeName": "LockID", "KeyType": "HASH"}
                            ],
                            "BillingMode": "PAY_PER_REQUEST",
                        },
                    },
                    "Bucket": {
                        "Type": "AWS::S3::Bucket",
                        "Properties": {
                            "AccessControl": "Private",
                            "BucketName": bucket,
                            "VersioningConfiguration": {"Status": "Enabled"},
                        },
                    },
                }
            }
        ),
    )

    # Wait for it to complete.
    log.ok("backend: please wait...")
    while True:
        sleep(10)
        response = cloudformation_client.describe_stacks(StackName=stack_name)
        for stack in response["Stacks"]:
            if stack["StackStatus"] == "CREATE_IN_PROGRESS":
                pass
            elif stack["StackStatus"] == "CREATE_COMPLETE":
                log.ok("backend: create complete")
                return
            else:
                log.bad(f"backend: {stack['StackStatus']}")
                log.bad(f"backend: {stack['StackStatusReason']}")
Beispiel #4
0
def terraform_backend_s3(bucket: str, dynamodb_table: str, **config: Any) -> Block:
    """
    This ensures that the S3 backend exists, prompting to create it if
    necessary, sets the credentials as environment variables in some
    cases, and returns a Terraform configuration block for it.

    """

    # Create a session from any AWS credentials options.

    session_kwargs = {}
    session_kwargs_map = {
        "profile": "profile_name",
        "access_key": "aws_access_key_id",
        "secret_key": "aws_secret_access_key",
        "token": "aws_session_token",
    }
    for config_key, session_key in session_kwargs_map.items():
        config_value = config.get(config_key)
        if config_value:
            session_kwargs[session_key] = config[config_key]

    session = get_session(**session_kwargs)

    region = config.get("region") or session.region_name

    # Replace the profile argument with environment variables.

    if config.get("profile"):
        creds = session.get_credentials()
        if not _profile_creds_definitely_supported_by_terraform(creds):

            # This profile is using credentials that Terraform may not
            # support, so export the credentials as environment variables.

            # Use environment variables for credentials rather than
            # injecting them into the backend configuration because
            # Terraform gets confused when the backend configuration
            # changes, which happens with certain AWS credential types
            # such as assuming roles.

            del config["profile"]

            export_environment_variables(session=session, region_name=region)

    # Assume role before interacting with backend resources. This not the same
    # as profiles that assume roles. This is when Terraform has specifically
    # been configured to assume a role. This is more likely to happen when
    # running Terraform on an EC2 instance using instance profile credentials,
    # or using environment variables to set credentials, and then assuming
    # different roles using those credentials.

    if config.get("role_arn"):
        session = _assume_role(
            session,
            RoleArn=config["role_arn"],
            RoleSessionName=config.get("session_name", ""),
            ExternalId=config.get("external_id", ""),
        )

    # Check if the backend resources have been created.

    status = _get_s3_backend_status(
        session=session, region_name=region, bucket=bucket, table=dynamodb_table
    )

    if not all(status.values()):

        if any(status.values()):

            log.bad("backend: incomplete backend setup")

            account_id = get_account_id(session=session)
            bucket_arn = _get_s3_bucket_arn(region, account_id, bucket)
            table_arn = _get_dynamodb_table_arn(region, account_id, dynamodb_table)

            if status["bucket_exists"]:
                log.ok(f"backend: {bucket_arn} found")
            else:
                log.bad(f"backend: {bucket_arn} not found")

            if status["bucket_versioning_enabled"]:
                log.ok(f"backend: {bucket_arn} versioning enabled")
            else:
                log.bad(f"backend: {bucket_arn} versioning disabled")

            if status["table_exists"]:
                log.ok(f"backend: {table_arn} found")
            else:
                log.bad(f"backend: {table_arn} not found")

            raise SystemExit(1)

        _create_s3_backend(
            session=session, bucket=bucket, table=dynamodb_table, region_name=region
        )

    # Return the configuration to use the backend.

    config["bucket"] = bucket
    config.setdefault("encrypt", True)
    config["dynamodb_table"] = dynamodb_table
    config["region"] = region

    return block("terraform", {"backend": {"s3": config}})