def verify_data(r): t = r.resource_type if t != "pulumi-nodejs:dynamic:Resource": return # Verify is_dry_run() assert is_dry_run() == r.props["isDryRun"] # Verify get_project() assert "PULUMI_TEST_PROJECT" in os.environ assert get_project() == os.environ["PULUMI_TEST_PROJECT"] assert get_project() == r.props["getProject"] # Verify get_stack() assert "PULUMI_TEST_STACK" in os.environ assert get_stack() == os.environ["PULUMI_TEST_STACK"] assert get_stack() == r.props["getStack"] # Verify Config assert json.dumps(CONFIG, sort_keys=True) == json.dumps(dict(r.props["allConfig"]), sort_keys=True) config = Config() value = config.require("aConfigValue") assert value == "this value is a value" assert aws_config.region == "us-west-2"
def setup_external_load_balancer( project_name: str, vpc_info: dict, security_groups: list, forward_http_to_https: bool = True, ): RESOURCE_NAME = f'{project_name}-ext-lb' load_balancer = alb.LoadBalancer( RESOURCE_NAME, access_logs=LOGGING_BUCKET_NAME.apply( lambda bucket: { 'bucket': bucket, 'enabled': True, 'prefix': f'{project_name}/external', } ), internal=False, security_groups=security_groups, subnets=vpc_info.apply( lambda vpc_info: [subnet for subnet in vpc_info['public_subnets']] ), # The ids of all the public subnets in vpc_info tags={'env': get_stack(), 'Name': RESOURCE_NAME, 'product': project_name}, ) if forward_http_to_https: setup_port_forwarding( project_name, load_balancer.arn, ) return load_balancer
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for eip_name, eip_configuration in resource_specs.items(): # AWS Elastic IP Dynamic Variables resource_name = eip_name resource_tags = eip_configuration["tags"] if "'tags':" in str( eip_configuration) else None # Lists tags_list = {} # Getting list of tags from configuration file if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) eip = net.Eip(resource_name, tags=tags_list) eip_ids_dict.update({eip._name: eip.id}) # Exporting each EIP pulumi.export(eip._name, eip.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() # # ECR Repository # for ecr_repo_name, ecr_repo_configuration in resource_specs.items(): # AWS ECR Dynamic Variables resource_repo_name = ecr_repo_name # resource_repo_version = eks_repo_configuration["version"] resource_repo_tags = None resource_repo_tags = ecr_repo_configuration[ "tags"] if "tags" in ecr_repo_configuration else None # Getting list of tags from configuration file repo_tags_list = {} if resource_repo_tags is not None: for each_tag_name, each_tag_value in resource_repo_tags.items( ): repo_tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags repo_tags_list.update({"Name": resource_repo_name}) repo_tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) repo_tags_list.update(resource_mandatory_tags)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() aws_vpc_id = VPCs.VPCId() for subnet_name, subnet_configuration in resource_specs.items(): # AWS Subnet Dynamic Variables resource_name = subnet_name resource_az = subnet_configuration["az"] resource_cidr = subnet_configuration["cidr"] resource_assign_public_ipv4 = subnet_configuration[ "assign-public-ipv4"] resource_vpc = subnet_configuration["vpc"] resource_tags = None resource_tags = subnet_configuration["tags"] if "'tags':" in str( subnet_configuration) else None this_vpc = aws_vpc_id[str(resource_vpc)] # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) subnet = net.Subnet( resource_name, vpc_id=this_vpc, cidr_block=resource_cidr, map_public_ip_on_launch=resource_assign_public_ipv4, availability_zone=resource_az, tags=tags_list # FIXME: This needs to be sorted # opts = pulumi.ResourceOptions( # parent = this_vpc, # depends_on = [this_vpc] # ) ) subnet_ids_dict.update({subnet._name: subnet.id}) subnet_cidr_blocks_dict.update({subnet._name: subnet.cidr_block}) subnet_azs_dict.update({subnet._name: subnet.availability_zone}) # Exporting each subnet created for future reference pulumi.export(subnet._name, subnet.id)
def test_ecr(self): """test method for ecr """ # pulumi.apply() for x in dir(ecr.pulumi): print(x) stack = pulumi.get_stack() print(stack) ecr.create_repository("hoge") self.assertEqual(1, 1)
def ParameterGroup(self): resource_specs = ParseYAML(resource_type).getSpecs() for parametergroup_name, parametergroup_configuration in resource_specs["parameter-group"].items(): # AWS DocumentDB Parameter Group Dynamic Variables resource_name = parametergroup_name resource_description = parametergroup_configuration["description"] resource_family = parametergroup_configuration["family"] resource_tags = None resource_tags = parametergroup_configuration["tags"] if "tags" in parametergroup_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Add mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()}) tags_list.update(resource_mandatory_tags) # Getting all parameters for each # individual DocumentDB Parameter Group resource_parameters = [] for each_parameter_key, each_parameter_value in parametergroup_configuration["parameters"].items(): resource_parameters.append( { "name": each_parameter_key, "value": each_parameter_value["value"], "applyMethod": each_parameter_value["apply"], }, ) # Create resource parametergroup = docdb.ClusterParameterGroup( resource_name, description = resource_description, family = resource_family, parameters = resource_parameters, tags = tags_list ) # Update resource dictionary parametergroup_ids_dict.update({parametergroup._name: parametergroup.id}) # Export parameter group pulumi.export(parametergroup._name, parametergroup.id)
def __init__(self, tags: dict = None): config = pulumi.Config() self.__tags = { 'project': pulumi.get_project(), 'stack': pulumi.get_stack(), 'costCenter': config.require('cost-center') } if tags is not None: self.__tags.update(tags)
def format_resource_name(name): """ Defines a fully-formated resource name """ template = '{project}-{stack}-{name}' resource_name = template.format( name=name, project=pulumi.get_project(), stack=pulumi.get_stack(), ) return resource_name
def PublicZone(self): resource_specs = ParseYAML(resource_type).getSpecs() for r53_public_zone_name, r53_public_zone_configuration in resource_specs[ "public-zone"].items(): # Route53 Public Dynamic Variables resource_name = r53_public_zone_name # Resetting all optional variables # with the default value None resource_comment = \ resource_tags = None # Cheking the documents content, if present # we will be assigning their values to our variables, # otherwise we'll set them to None resource_comment = r53_public_zone_configuration[ "comment"] if "comment" in r53_public_zone_configuration else None resource_tags = r53_public_zone_configuration[ "tags"] if "tags" in r53_public_zone_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) # Create Route53 Public Zone route53_public_zone = route53.Zone(resource_name, name=resource_name, comment=resource_comment, tags=tags_list) pulumi.export(resource_name, [{ "ID": route53_public_zone.id, "Name servers": route53_public_zone.name_servers, "Zone ID": route53_public_zone.zone_id }]) route53_public_zone_ids_dict.update( {route53_public_zone._name: route53_public_zone.id})
def _get_execution_context(subscription_id) -> ExecutionContext: logging.basicConfig(level=logging.DEBUG) location = pulumi.config.get_config('azure:location') platform = pulumi.config.get_config('platform:name') project = pulumi.get_project() stack = pulumi.get_stack() context = ExecutionContext(location, platform, stack, project, subscription_id, datetime.now()) v = Validator(EXECUTION_CONTEXT_SCHEMA, require_all=True) if not v.validate(asdict(context)): raise Exception(v.errors) return context
def local_grapl_user(table: aws.dynamodb.Table, username: str, cleartext: str) -> None: """Create a user only for local development uses; NEVER REAL AWS""" deployment_name = pulumi.get_stack() user = aws.dynamodb.TableItem( f"{deployment_name}-user-{username}", table_name=table.name, hash_key=table.hash_key, item=json.dumps(local_user_item(username, cleartext)), ) pulumi.export(f"user-{username}", user.id)
def check_lb(args): id_, tags, internal, subnets, access_logs = args assert id_ == f'{test_project_name}-int-lb' assert 'env' in tags assert 'Name' in tags assert 'product' in tags assert tags['env'] == pulumi.get_stack() assert tags['Name'] == f'{test_project_name}-int-lb' assert tags['product'] == test_project_name assert internal is True assert subnets == test_subnets assert access_logs == test_bucket
def SubnetGroup(self): resource_specs = ParseYAML(resource_type).getSpecs() aws_subnet_id = Subnets.SubnetId() for subnetgroup_name, subnetgroup_configuration in resource_specs["subnet-group"].items(): # AWS DocumentDB Subnet Group Dynamic Variables resource_name = subnetgroup_name resource_description = subnetgroup_configuration["description"] resource_subnet_ids = subnetgroup_configuration["subnets"] resource_tags = None resource_tags = subnetgroup_configuration["tags"] if "tags" in subnetgroup_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()}) tags_list.update(resource_mandatory_tags) resource_subnets_list = [] for each_subnet_found in resource_subnet_ids: resource_subnets_list.append(aws_subnet_id[str(each_subnet_found)]) subnetgroup = docdb.SubnetGroup( resource_name, description = resource_description, subnet_ids = resource_subnets_list, tags = tags_list ) # Update resource dictionaries subnetgroup_ids_dict.update({subnetgroup._name: subnetgroup.id}) # Export pulumi.export(subnetgroup._name, subnetgroup.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() aws_subnet_id = Subnets.SubnetId() aws_eip_id = ElasticIPs.ElasticIPId() for natgw_name, natgw_configuration in resource_specs.items(): # AWS NAT Gateway Variables resource_name = natgw_name resource_subnet = natgw_configuration["subnet"] resource_eip = natgw_configuration["elastic_ip"] resource_tags = None resource_tags = natgw_configuration[ "tags"] if "tags" in natgw_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) this_subnet = aws_subnet_id[str(resource_subnet)] this_eip = aws_eip_id[str(resource_eip)] aws_natgw = net.NatGateway(resource_name, subnet_id=this_subnet, allocation_id=this_eip, tags=tags_list) # Update resource dictionaries natgw_ids_dict.update({aws_natgw._name: aws_natgw.id}) # Export pulumi.export(aws_natgw._name, aws_natgw.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for vpc_name, vpc_conf in resource_specs.items(): # AWS VPC Dynamic Variables resource_name = vpc_name resource_cidr = vpc_conf['cidr'] resource_dns_resolution = vpc_conf['dns-resolution'] resource_dns_hostnames = vpc_conf['dns-hostnames'] resource_tags = None resource_tags = vpc_conf["tags"] if "tags" in vpc_conf else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Add mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) # Create resource vpc = net.Vpc(resource_name, cidr_block=resource_cidr, enable_dns_support=resource_dns_resolution, enable_dns_hostnames=resource_dns_hostnames, tags=tags_list) # Update resource dictionary vpc_ids_dict.update({vpc._name: vpc.id}) # Export the name of each VPC pulumi.export(vpc._name, vpc.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for s3_bucket_name, s3_bucket_configuration in resource_specs.items(): # AWS S3 Dynamic Variables resource_name = s3_bucket_name resource_tags = None resource_tags = s3_bucket_configuration[ "tags"] if "tags" in s3_bucket_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) sse_config = s3_bucket_configuration[ "serverSideEncryptionConfiguration"] if "serverSideEncryptionConfiguration" in s3_bucket_configuration else None # Create S3s bucket = s3.Bucket( resource_name, acl=s3_bucket_configuration["acl"], force_destroy=s3_bucket_configuration["force-destroy"], tags=tags_list, server_side_encryption_configuration=sse_config) # Export pulumi.export(bucket._name, bucket.id)
def setup_internal_load_balancer(project_name: str, vpc_info: dict, security_groups: list): RESOURCE_NAME = f'{project_name}-int-lb' load_balancer = alb.LoadBalancer( RESOURCE_NAME, access_logs=LOGGING_BUCKET_NAME.apply( lambda bucket: { 'bucket': bucket, 'enabled': True, 'prefix': f'{project_name}/internal', } ), internal=True, security_groups=security_groups, subnets=vpc_info.apply( lambda vpc_info: [subnet for subnet in vpc_info['private_subnets']] ), # The ids of all the private subnets in vpc_info tags={'env': get_stack(), 'Name': RESOURCE_NAME, 'product': project_name}, ) return load_balancer
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() aws_vpc_id = VPCs.VPCId() for igw_name, igw_configuration in resource_specs.items(): # AWS Internet Gateway Variables resource_name = igw_name resource_vpc = igw_configuration["vpc"] resource_tags = None resource_tags = igw_configuration["tags"] if "tags" in igw_configuration else None this_vpc = aws_vpc_id[str(resource_vpc)] # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()}) tags_list.update(resource_mandatory_tags) aws_igw = igw.InternetGateway( resource_name, vpc_id = this_vpc, tags = resource_tags ) igw_ids_dict.update({aws_igw._name: aws_igw.id}) # Export the name of each Internet Gateway pulumi.export(aws_igw._name, aws_igw.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for keypair_name, keypair_configuration in resource_specs.items(): # AWS KeyPair Dynamic Variables resource_name = keypair_name resource_public_key = keypair_configuration['public_key'] resource_tags = None resource_tags = keypair_configuration[ "tags"] if "tags" in keypair_configuration else None # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) # Create resource keypair = am.KeyPair(resource_name, public_key=resource_public_key, tags=tags_list) # Update resource dictionary keypair_ids_dict.update({keypair._name: keypair.id}) # Exporting each KeyPair ID created for future reference pulumi.export(keypair._name, keypair.id)
def init(): ami = aws.get_ami(filters=[ { "name": "name", "values": ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*"], }, { "name": "virtualization-type", "values": ["hvm"], }, ], most_recent=True, owners=["099720109477"]) ec2_instance = aws.ec2.Instance("dev-machine", ami=ami.id, instance_type="t3.micro", tags={ "provision": "pulumi", "stage": pulumi.get_stack(), "project": "webserver" })
def __init__(self, keypair_name, provider_cloud_admin): self.config = pulumi.Config() self.stack_name = pulumi.get_stack() self.provider_cloud_admin = provider_cloud_admin self.keypair = compute.get_keypair(keypair_name) private_key_file = ( self.config.get("privateKeyFile") or "/pulumi/automation/etc/.ssh/id_rsa" ) self.props = SimpleNamespace( external_network=json.loads(self.config.require("externalNetwork")), mgmt_network=json.loads(self.config.require("managementNetwork")), deploy_network=json.loads(self.config.require("deploymentNetwork")), helper_vm=json.loads(self.config.require("helperVM")), public_router_name=self.config.require("publicRouter"), keypair_name=keypair_name, private_key_file=private_key_file, ) mgmt_network = networking.get_network(name=self.props.mgmt_network["name"]) mgmt_subnet = networking.get_subnet(name=self.props.mgmt_network["subnet_name"]) self.resources = SimpleNamespace( mgmt_network=mgmt_network, mgmt_subnet=mgmt_subnet, )
"""An AWS Python Pulumi program""" # REF: https://github.com/pulumi/examples/blob/master/aws-py-fargate/__main__.py import json import pulumi import pulumi_aws as aws # get configs defined in our yaml files config = pulumi.Config() network_layer_stack = config.require("network-layer-stack") pulumi_account = config.require("user-account") # get settings from stack references env = pulumi.get_stack() network_layer = pulumi.StackReference(f"{pulumi_account}/network-layer/{env}") # Read back the project VPC and subnets id's that were set up in the network-layer-{env}, which we will use. vpc_id = network_layer.require_output("vcp_id") vpc_azs = network_layer.require_output("vpc_azs") private_subnet_1_id = network_layer.require_output("private_subnet_1_id") private_subnet_2_id = network_layer.require_output("private_subnet_2_id") public_subnet_1_id = network_layer.require_output("public_subnet_1_id") public_subnet_2_id = network_layer.require_output("public_subnet_2_id") # # un-stringify the lists # private_subnets = json.loads(private_subnets) # public_subnets = json.loads(public_subnets) # Create an ECS cluster to run a container-based service. cluster = aws.ecs.Cluster("dask-ml-workflows")
cmd-args: config config-helper --format=json cmd-path: gcloud expiry-key: '{{.credential.token_expiry}}' token-key: '{{.credential.access_token}}' name: gcp """.format( info[2]["clusterCaCertificate"], info[1], "{0}_{1}_{2}".format(project, zone, info[0]), )) # Make a Kubernetes provider instance that uses our cluster from above. k8s_provider = Provider("gke_k8s", kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = {"app": "canary-{0}-{1}".format(get_project(), get_stack())} canary = Deployment( "canary", spec={ "selector": { "matchLabels": labels }, "replicas": 1, "template": { "metadata": { "labels": labels }, "spec": { "containers": [{ "name": "nginx", "image": "nginx"
import pulumi import component stack = pulumi.get_stack() if stack == "core": dns_config = component.SesDNSConfig('inbound-mail-dns-test') pulumi.export('domain_name', dns_config.domain_name) pulumi.export('zone_name', dns_config.zone_name) else: mail_processor = component.InboundMailProcessor( name='inbound-mail-processor-test', # name of the resource handler='handler.py' # lambda function to process inbound email ) pulumi.export('email_id', mail_processor.email_id)
self.message = message # retrieve the stack configuration data config = Config() # retrieve optional separator choice and suffix separator = config.get('separator') or '-' separator = separator[0] if separator == ' ': separator = '' suffix = config.get('suffix') or '' # retrieve project and stack (org not yet available) project = get_project() stack = get_stack() # set default tags to be applied to all taggable resources default_tags = { 'manager': 'pulumi', 'project': project, 'stack': stack, } # Azure Bastion hosts in hub and spokes (until functional across peerings) azure_bastion = config.get_bool('azure_bastion') # Azure Firewall to route all Internet-bound traffic to designated next hop forced_tunnel = config.get('forced_tunnel') if forced_tunnel: ft_ip = ip_address(forced_tunnel) # check IP address is valid
"""An AWS Python Pulumi program""" import os import sys import pulumi import re from pulumi_aws import s3, kms # if not 'GIT_BRANCH' in os.environ: # sys.exit('GIT_BRANCH must be set') # env_name = re.sub(r'[^a-z]+', '-', os.environ['GIT_BRANCH'].lower()) stack_name = pulumi.get_stack() print("INFO : Stack name is", stack_name) config = pulumi.Config() key = kms.Key(f'{stack_name}-key') bucket = s3.Bucket(f'{stack_name}-bucket', server_side_encryption_configuration={ "rule": { 'apply_server_side_encryption_by_default': { 'sse_algorithm': 'aws:kms', 'kms_master_key_id': key.id } } }) # Export the name of the bucket pulumi.export('bucket_name', bucket.id)
def __init__(self): resource_specs = ParseYAML(resource_type).getSpecs() for lambda_name, config in resource_specs.items(): config = config if config else {} resource_name = lambda_name resource_tags = config.get("tags") resource_env = config.get("environment") # Getting list of tags from configuration file tags_list = {} if resource_tags is not None: for each_tag_name, each_tag_value in resource_tags.items(): tags_list.update({each_tag_name: each_tag_value}) # Generating ENV vars env_list = {} if resource_env is not None: for each_env_name, each_env_value in resource_env.items(): env_list.update({each_env_name: each_env_value}) # Adding mandatory tags tags_list.update({"Name": resource_name}) tags_list.update({ "Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack() }) tags_list.update(resource_mandatory_tags) lambda_function = lambda_.Function( lambda_name, environment=lambda_.FunctionEnvironmentArgs( variables=env_list), handler=config.get("handler"), s3_bucket=config.get("s3_bucket"), s3_key=config.get("s3_key"), s3_object_version=config.get("s3_object_version"), memory_size=config.get("memory_size"), publish=config.get("publish"), reserved_concurrent_executions=config.get( "reserved_concurrent_executions"), role=IAM.RoleARN()[config.get("role")], runtime=config.get("runtime"), timeout=config.get("timeout"), tags=tags_list) # Export pulumi.export(lambda_function._name, lambda_function.id) # Event source mappings for mapping_name, mapping_config in config.get( "event_source_mapping").items(): event_source = mapping_config["event_source"] assert event_source.get( "type" ) == "sqs", "Just sqs is currently supported as event source mapping. You're welcome to implement more." source_arn = SQS.ByName()[event_source["name"]].arn mapping = lambda_.EventSourceMapping( mapping_name, event_source_arn=source_arn, function_name=lambda_function.arn, batch_size=mapping_config.get("batch_size")) pulumi.export(mapping_name, mapping.id) lambdas_by_name[lambda_name] = lambda_function
user: auth-provider: config: cmd-args: config config-helper --format=json cmd-path: gcloud expiry-key: '{{.credential.token_expiry}}' token-key: '{{.credential.access_token}}' name: gcp """.format(info[2]['clusterCaCertificate'], info[1], '{0}_{1}_{2}'.format( project, zone, info[0]))) # Make a Kubernetes provider instance that uses our cluster from above. k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config) # Create a canary deployment to test that this cluster works. labels = {'app': 'canary-{0}-{1}'.format(get_project(), get_stack())} canary = Deployment('canary', spec={ 'selector': { 'matchLabels': labels }, 'replicas': 1, 'template': { 'metadata': { 'labels': labels }, 'spec': { 'containers': [{ 'name': 'nginx', 'image': 'nginx' }]
import pulumi from pulumi_azure_nextgen.resources import latest as resources from pulumi_azure_nextgen.storage import latest as storage from pulumi_azure_nextgen.network import latest as network import json from pprint import pprint import os stackName = pulumi.get_stack() # Check if the stackName is either develop or production if stackName == "develop": print("Pulumi Stack is develop") elif stackName == "production": print("Pulumi Stack is production - be careful") else: raise AssertionError( "We can only serve the develop and production Pulumi stack") # Import generic variables from JSON with open("../../generic/json/generic.json") as generic_params_file: generic_params_data = json.load(generic_params_file) print("Generic parameter file contents:") pprint(generic_params_data) # Import environment-specific variables from JSON env_params_file_location = str("../../generic/json/infrastructure-" + stackName + ".json") with open(env_params_file_location) as env_params_file: