def pretf_blocks(): """ This demonstrates recursively uploading files to an S3 bucket. """ # Create an S3 bucket. bucket = yield block( "resource", "aws_s3_bucket", "test", { "bucket": "pretf-example-aws-files", "acl": "private" }, ) # Upload all files from the "files" and "more-files" directories. total_files = 0 total_bytes = 0 for source in ("files", "more-files"): objects = yield aws_s3_bucket_objects(bucket=bucket, source=source) total_files += objects.total_files total_bytes += objects.total_bytes # Output some stats. yield block("output", "total_files", {"value": total_files}) yield block("output", "total_bytes", {"value": total_bytes})
def iam_group(var): # Inputs. yield block("variable", "name", {}) yield block("variable", "path", {"default": "/"}) # Resources. group = yield block("resource", "aws_iam_group", var.name, {"name": var.name}) # Outputs. yield block("output", "name", {"value": var.name}) yield block("output", "resource", {"value": group})
def pretf_blocks(var): additional = yield block( "resource", "random_id", "additional", { "byte_length": 2, "prefix": var.additional_prefix }, ) yield block("output", "additional", {"value": additional.hex})
def test_create(self): workflow.delete_files("*.json") with self.create("one.tf.json"): one = yield block("variable", "one", {"default": True}) yield block("output", "one", {"value": one}) self.tf.init() outputs = self.tf.apply() assert outputs == {"one": True}
def iam_user(var): # Inputs. yield block("variable", "name", {}) yield block("variable", "path", {"default": "/"}) # Resources. user = yield block( "resource", "aws_iam_user", var.name, {"name": var.name, "path": var.path} ) # Outputs. yield block("output", "name", {"value": var.name}) yield block("output", "resource", {"value": user})
def pretf_blocks(var): # Create variables needed by this file. yield block( "variable", "aws_credentials", { "default": { "nonprod": { "profile": "pretf-nonprod", }, "prod": { "profile": "pretf-prod", }, }, }) yield block("variable", "aws_region", { "default": "eu-west-1", }) yield block("variable", "environment", { "type": "string", }) yield block("variable", "stack", { "type": "string", }) # Create a backend configuration using the environment details. # Stacks in the same account share backend resources. if var.environment == 'prod': account = "prod" else: account = "nonprod" backend = f"pretf-examples-flatten-{account}" yield terraform_backend_s3( bucket=backend, dynamodb_table=backend, key=f"{var.stack}/terraform.tfstate", region=var.aws_region, **var.aws_credentials[account], ) # Create a default AWS provider for this environment. yield provider_aws( region=var.aws_region, **var.aws_credentials[account], )
def aws_iam_user_group_membership(var): # Inputs. yield block("variable", "group", {}) yield block("variable", "users", {}) # Resources. group_label = labels.get(var.group) for user_label, user in sorted(var.users.items()): label = f"{user_label}_in_{group_label}" yield block( "resource", "aws_iam_user_group_membership", label, {"user": user.name, "groups": [var.group.name]}, )
def pretf_blocks(terraform, var): # Create variables needed by this file. yield block( "variable", "aws_credentials", { "default": { "nonprod": { "profile": "pretf-nonprod", }, "prod": { "profile": "pretf-prod", }, }, }) yield block("variable", "aws_region", { "default": "eu-west-1", }) yield block("variable", "environment", { "type": "string", }) yield block("variable", "stack", { "type": "string", }) # Create a backend configuration in the prod account, # because all workspaces must use the same backend. yield terraform_backend_s3( bucket="pretf-examples-workspaces", dynamodb_table="pretf-examples-workspaces", key=f"{var.stack}/terraform.tfstate", region="eu-west-1", workspace_key_prefix="", **var.aws_credentials["prod"], ) # Create a default AWS provider for this workspace. if terraform.workspace == "prod": account = "prod" else: account = "nonprod" yield provider_aws( region=var.aws_region, **var.aws_credentials[account], )
def provider_aws(**body: dict) -> Block: """ Returns an AWS provider block. If provided, the `profile` option may be replaced with temporary credentials for that profile. """ if body.get("profile"): session = get_session(profile_name=body["profile"]) creds = session.get_credentials() if not _profile_creds_definitely_supported_by_terraform(creds): # This profile is using credentials that Terraform may not # support, so get the credentials and inject them into the # configuration. del body["profile"] frozen_creds = creds.get_frozen_credentials() body["access_key"] = frozen_creds.access_key body["secret_key"] = frozen_creds.secret_key if creds.token: body["token"] = frozen_creds.token return block("provider", "aws", body)
def test_change(self): with self.create("one.tf.json"): one = yield block("variable", "one", {"default": False}) yield block("output", "one", {"value": one}) with self.create("two.tf.json"): two = yield block("variable", "two", {"default": { "x": [1, 2, 3], "y": 4 }}) yield block("output", "two", {"value": two}) outputs = self.tf.apply() assert outputs == {"one": False, "two": {"x": [1, 2, 3], "y": 4}}
def terraform_remote_state_s3(name: str, **body: Any) -> Block: """ This returns a Terraform configuration block for a "terraform_remote_state" data source, with added support for AWS profiles using MFA prompts. """ body["backend"] = "s3" config = body.get("config", {}) if config.get("profile"): session = get_session(profile_name=config["profile"]) creds = session.get_credentials() if not _profile_creds_definitely_supported_by_terraform(creds): # This profile is using credentials that Terraform may not # support, so get static/frozen credentials and inject them # into the configuration. del config["profile"] frozen_creds = creds.get_frozen_credentials() config["access_key"] = frozen_creds.access_key config["secret_key"] = frozen_creds.secret_key if creds.token: config["token"] = frozen_creds.token return block("data", "terraform_remote_state", name, body)
def pretf_blocks(var): private_label = "private" private = yield block( "resource", "aws_security_group", private_label, {"name": "pretf-example-aws-private"}, ) public_label = "public" public = yield block( "resource", "aws_security_group", public_label, {"name": "pretf-example-aws-public"}, ) for cidr in sorted(set(var.security_group_allowed_cidrs)): cidr_label = cidr.replace(".", "_").replace("/", "_") if IPv4Network(cidr).is_global: group = public group_label = public_label else: group = private group_label = private_label for port in (80, 443): rule_label = f"{group_label}_{port}_from_{cidr_label}" yield block( "resource", "aws_security_group_rule", rule_label, { "security_group_id": group.id, "type": "ingress", "protocol": "tcp", "from_port": port, "to_port": port, "cidr_blocks": [cidr], }, ) yield block("output", "private_sg_id", {"value": private.id}) yield block("output", "public_sg_id", {"value": public.id})
def iam_group_with_users(var): # Inputs. yield block("variable", "group_name", {}) yield block("variable", "user_names", {}) # Yield resources from a nested collection. group = yield iam_group(name=var.group_name) # Yield resources from a nested collection. users = {} for name in var.user_names: user = yield iam_user(name=name) users[name] = user.resource # Yield resources from a nested collection, # using "yield from" this time. # It can be assigned to a variable this way. yield from aws_iam_user_group_membership(group=group.resource, users=users) # Outputs. yield block("output", "group", {"value": group.resource}) yield block("output", "users", {"value": group.resource})
def pretf_blocks(var): group = yield block("resource", "aws_iam_group", "pretf", { "name": "pretf-aws", }) for name in var.user_names: name_label = labels.clean(name) user = yield block("resource", "aws_iam_user", name_label, { "name": name, }) yield block("resource", "aws_iam_user_group_membership", name_label, { "user": user.name, "groups": [group.name], }) yield block("output", f"user_{name_label}", { "value": user.name, })
def pretf_blocks(var): yield block("output", "two_attr", {"value": var.two}) yield block("output", "two_dict", {"value": var["two"]}) yield block("variable", "three", {"default": 3}) yield block("output", "three", {"value": var.three}) yield block("variable", "four", {}) yield block("output", "four", {"value": var.four})
def python_lambda_resources(var): yield block("variable", "version", {}) label = f"python_{var.version.replace('.', '')}" runtime = f"python{var.version}" random = yield block( "resource", "random_id", label, { "prefix": f"terraform-aws-lambda-builder-tests-{label}-", "byte_length": 8 }, ) role = yield block( "module", f"{label}_role", { "source": "git::https://gitlab.com/claranet-pcp/terraform/aws/terraform-aws-lambda-role.git?ref=v0.0.1", "function_name": random.hex, "cloudwatch_logs": True, }, ) func = yield block( "module", f"{label}_lambda", { "source": "../../", "build_mode": "LAMBDA", "create_role": False, "function_name": random.hex, "handler": "lambda.handler", "role": role.arn, "runtime": runtime, "s3_bucket": block("aws_s3_bucket", "packages", {}).id, "source_dir": "./src", "timeout": 30, }, ) yield block("output", "function_name", {"value": func.function_name})
def aws_s3_bucket_objects(var): """ Creates aws_s3_bucket_object resources for all files in the given source directory. This is using the "collections" API to create a reusable function that generates resources. """ # Inputs. yield block("variable", "bucket", {}) yield block("variable", "prefix", {"default": ""}) yield block("variable", "source", {}) # Get the resource name of the bucket, # to be used in object resource names. bucket_label = labels.get(var.bucket) total_files = 0 total_bytes = 0 # Resources. for path in Path(var.source).rglob("*"): if path.is_file(): key = f"{var.prefix}{path.relative_to(var.source)}" object_label = labels.clean(f"{bucket_label}/{key}") yield block( "resource", "aws_s3_bucket_object", object_label, { "bucket": var.bucket.id, "key": key, "source": path }, ) total_files += 1 total_bytes += os.path.getsize(path) # Outputs. yield block("output", "total_files", {"value": total_files}) yield block("output", "total_bytes", {"value": total_bytes})
def pretf_blocks(var): yield block("variable", "one", {"default": 1}) yield block("output", "one", {"value": var.one}) yield block("variable", "two", {"default": 2})
def pretf_blocks(var): yield block("output", "five", {"value": var.five}) yield block("output", "six", {"value": var.six}) yield block("output", "seven", {"value": var.seven})
def pretf_blocks(path): function_names = [] for version in ("3.6", "3.7"): python_lambda = yield python_lambda_resources(version=version) function_names.append(python_lambda.function_name) yield block("output", "function_names", {"value": function_names})
def terraform_backend_s3(bucket: str, dynamodb_table: str, **config: Any) -> Block: """ This ensures that the S3 backend exists, prompting to create it if necessary, sets the credentials as environment variables in some cases, and returns a Terraform configuration block for it. """ # Create a session from any AWS credentials options. session_kwargs = {} session_kwargs_map = { "profile": "profile_name", "access_key": "aws_access_key_id", "secret_key": "aws_secret_access_key", "token": "aws_session_token", } for config_key, session_key in session_kwargs_map.items(): config_value = config.get(config_key) if config_value: session_kwargs[session_key] = config[config_key] session = get_session(**session_kwargs) region = config.get("region") or session.region_name # Replace the profile argument with environment variables. if config.get("profile"): creds = session.get_credentials() if not _profile_creds_definitely_supported_by_terraform(creds): # This profile is using credentials that Terraform may not # support, so export the credentials as environment variables. # Use environment variables for credentials rather than # injecting them into the backend configuration because # Terraform gets confused when the backend configuration # changes, which happens with certain AWS credential types # such as assuming roles. del config["profile"] export_environment_variables(session=session, region_name=region) # Assume role before interacting with backend resources. This not the same # as profiles that assume roles. This is when Terraform has specifically # been configured to assume a role. This is more likely to happen when # running Terraform on an EC2 instance using instance profile credentials, # or using environment variables to set credentials, and then assuming # different roles using those credentials. if config.get("role_arn"): session = _assume_role( session, RoleArn=config["role_arn"], RoleSessionName=config.get("session_name", ""), ExternalId=config.get("external_id", ""), ) # Check if the backend resources have been created. status = _get_s3_backend_status( session=session, region_name=region, bucket=bucket, table=dynamodb_table ) if not all(status.values()): if any(status.values()): log.bad("backend: incomplete backend setup") account_id = get_account_id(session=session) bucket_arn = _get_s3_bucket_arn(region, account_id, bucket) table_arn = _get_dynamodb_table_arn(region, account_id, dynamodb_table) if status["bucket_exists"]: log.ok(f"backend: {bucket_arn} found") else: log.bad(f"backend: {bucket_arn} not found") if status["bucket_versioning_enabled"]: log.ok(f"backend: {bucket_arn} versioning enabled") else: log.bad(f"backend: {bucket_arn} versioning disabled") if status["table_exists"]: log.ok(f"backend: {table_arn} found") else: log.bad(f"backend: {table_arn} not found") raise SystemExit(1) _create_s3_backend( session=session, bucket=bucket, table=dynamodb_table, region_name=region ) # Return the configuration to use the backend. config["bucket"] = bucket config.setdefault("encrypt", True) config["dynamodb_table"] = dynamodb_table config["region"] = region return block("terraform", {"backend": {"s3": config}})
import pytest from pretf.api import block @pytest.mark.parametrize( "obj,expected", [ (block("provider", "aws", {}), "aws"), (block("provider", "aws", {}).alias, "aws"), (block("provider", "aws", {"region": "eu-west-1"}), "aws"), (block("provider", "aws", {"region": "eu-west-1"}).alias, "aws"), (block("provider", "aws", {"alias": "nonprod"}), "aws.nonprod"), (block("provider", "aws", {"alias": "nonprod"}).alias, "aws.nonprod"), (block("variable", "one", {}), "${var.one}"), (block("resource", "one", "two", {}), "${one.two}"), ( block("resource", "aws_instance", "www", {}).ipv6_addresses[0], "${aws_instance.www.ipv6_addresses[0]}", ), ( block("resource", "one", "two", {}).list[0].another_list[1], "${one.two.list[0].another_list[1]}", ), ], ) def test_block(obj, expected): assert str(obj) == expected