description='Enable HTTP access', ingress=[{ 'protocol': 'icmp', 'from_port': 8, 'to_port': 0, 'cidr_blocks': ['0.0.0.0/0'] }, { 'protocol': 'tcp', 'from_port': 80, 'to_port': 80, 'cidr_blocks': ['0.0.0.0/0'] }]) server = aws.ec2.Instance( 'web-server', instance_type="t2.micro", security_groups=[group.name], ami=ami.id, user_data=""" #!/bin/bash echo "Hello, World!" > index.html nohup python -m SimpleHTTPServer 80 & """, tags={ "Name": "web-server", }, ) export('ip', server.public_ip) export('hostname', server.public_dns)
class FinalResource(CustomResource): number: Output[str] def __init__(self, name, number): CustomResource.__init__(self, "test:index:FinalResource", name, { "number": number, }) def assert_eq(lhs, rhs): assert lhs == rhs res1 = MyResource("testResource1") res2 = MyResource("testResource2") res1.number.apply(lambda n: assert_eq(n, 2)) res2.number.apply(lambda n: assert_eq(n, 3)) # Output.all combines a list of outputs into an output of a list. resSum = Output.all(res1.number, res2.number).apply(lambda l: l[0] + l[1]) FinalResource("testResource3", resSum) # Test additional Output helpers hello_world = Output.concat( "Hello ", Output.from_input("world!")).apply(lambda s: assert_eq(s, "Hello world!")) export("helloworld", hello_world)
"Creating or updating virtual network", virtualNetwork["name"], "in resource group", str(platformResourceGroup.name), ) platformVirtualNetwork = network.VirtualNetwork( virtualNetwork["name"], virtual_network_name=virtualNetwork["name"], location=platformResourceGroup.location, resource_group_name=platformResourceGroup.name, address_space=virtualNetwork["addressSpace"], subnets=virtualNetwork["subnets"], ) # Export relevant outputs for other projects outputFormatId = "platformVirtualNetworkId-" + str(virtualNetwork["name"]) pulumi.export(outputFormatId, platformVirtualNetwork.id) outputFormatName = "platformVirtualNetworkName-" + str( virtualNetwork["name"]) pulumi.export(outputFormatName, platformVirtualNetwork.name) outputFormatSubnets = "platformVirtualNetworkSubnets-" + str( virtualNetwork["name"]) pulumi.export(outputFormatSubnets, platformVirtualNetwork.subnets) # Export relevant data to Pulumi output pulumi.export("platformResourceGroupName", platformResourceGroup.name) pulumi.export("platformStorageAccountId", platformStorageAccount.id)
administrator_login_password=pwd, version="12.0") database = sql.Database("appservice-db", resource_group_name=resource_group.name, server_name=sql_server.name, requested_service_objective_name="S0") connection_string = Output.all(sql_server.name, database.name, username, pwd) \ .apply(lambda args: f"Server=tcp:{args[0]}.database.windows.net;initial catalog={args[1]};user ID={args[2]};password={args[3]};Min Pool Size=0;Max Pool Size=30;Persist Security Info=true;") app = appservice.AppService("appservice-as", resource_group_name=resource_group.name, app_service_plan_id=app_service_plan.id, app_settings={ "WEBSITE_RUN_FROM_PACKAGE": signed_blob_url, "ApplicationInsights:InstrumentationKey": app_insights.instrumentation_key, "APPINSIGHTS_INSTRUMENTATIONKEY": app_insights.instrumentation_key, }, connection_strings=[{ "name": "db", "type": "SQLAzure", "value": connection_string }]) export("endpoint", app.default_site_hostname.apply(lambda endpoint: "https://" + endpoint))
print(location) resource_group = core.ResourceGroup('my_pulumi_resource_group', location=location) # Create an Azure resource (Storage Account) account = storage.Account( 'storage', # The location for the storage account will be derived automatically from the resource group. resource_group_name=resource_group.name, account_tier='Standard', account_replication_type='LRS') # play around with some resource groups and maybe a function? appService = appservice.Plan("appServicePlan", resource_group_name=resource_group.name, sku={ "tier": "free", "size": "F1" }) example_function = appservice.FunctionApp( "MyTestFunction", resource_group_name=resource_group.name, app_service_plan_id=appService.id, app_settings={"runtime": "python"}, storage_connection_string=account) # Export the connection string for the storage account pulumi.export('connection_string', account.primary_connection_string)
#ami = aws.ec2.get_ami(most_recent="true", # owners=["099720109477"], # filters=[{"name":"name","values":["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"]}]) ami = aws.ec2.get_ami(most_recent="true", owners=["137112412989"], filters=[{ "name": "name", "values": ["amzn-ami-hvm-*"] }]) group = aws.ec2.SecurityGroup('pulumi_allow_8080', description='Enable access to port 8080', ingress=[{ 'protocol': 'tcp', 'from_port': 8080, 'to_port': 8080, 'cidr_blocks': ['0.0.0.0/0'] }]) server = aws.ec2.Instance( 'webserver', instance_type=size, vpc_security_group_ids=[group.id], ami=ami.id, user_data=user_data, tags={"Name": "Testserver-Pulumi-updated"}, ) pulumi.export('publicIp', server.public_ip) pulumi.export('publicHostName', server.public_dns)
}, }, __opts__=ResourceOptions(provider=k8s_provider)) ingress = Service('do-app-svc', spec={ 'type': 'LoadBalancer', 'selector': app_labels, 'ports': [{ 'port': 80 }], }, __opts__=ResourceOptions(provider=k8s_provider, custom_timeouts={ "create": "15m", "delete": "15m" })) ingress_ip = ingress.status['load_balancer']['ingress'][0]['ip'] export('ingress_ip', ingress_ip) if domain_name: domain = do.Domain("do-domain", name=domain_name, ip_address=ingress_ip) cname_record = do.DnsRecord("do-domain-name", domain=domain_name, type="CNAME", name="www", value="@")
location=resource_group.location, network_interface_ids=[network_iface.id], vm_size="Standard_A0", delete_data_disks_on_termination=True, delete_os_disk_on_termination=True, os_profile={ "computer_name": "hostname", "admin_username": username, "admin_password": password, "custom_data": userdata, }, os_profile_linux_config={ "disable_password_authentication": False, }, storage_os_disk={ "create_option": "FromImage", "name": "myosdisk1", }, storage_image_reference={ "publisher": "canonical", "offer": "UbuntuServer", "sku": "16.04-LTS", "version": "latest", }) combined_output = Output.all(vm.id, public_ip.name, public_ip.resource_group_name) public_ip_addr = combined_output.apply( lambda lst: network.get_public_ip(name=lst[1], resource_group_name=lst[2])) pulumi.export("public_ip", public_ip_addr.ip_address)
load_balancer_type="application", subnets=[s.id for s in public_subnets], security_groups=[lb_sg.id], internal=False) alb_tg = aws.lb.TargetGroup("lambda-pulumi", target_type="lambda", health_check={ "enabled": True, "path": "/", "matcher": 200, "timeout": 30, "interval": 40 }) aws.lambda_.Permission("load-balancer", action="lambda:InvokeFunction", function=function.id, principal="elasticloadbalancing.amazonaws.com") aws.lb.TargetGroupAttachment("lambda", target_group_arn=alb_tg.arn, target_id=function.arn) aws.lb.Listener("lambda", load_balancer_arn=lb.arn, port=80, protocol="HTTP", default_actions=[{ "type": "forward", "target_group_arn": alb_tg.arn }]) # Export to output pulumi.export("fqdn", lb.dns_name)
) ], ports=[ContainerPortArgs(container_port=80, )], ) ], ), ), )) frontend_service = Service( "frontend", metadata=ObjectMetaArgs( name="frontend", labels=frontend_labels, ), spec=ServiceSpecArgs( type="ClusterIP" if isMinikube else "LoadBalancer", ports=[ServicePortArgs(port=80)], selector=frontend_labels, )) frontend_ip = "" if isMinikube: frontend_ip = frontend_service.spec.apply( lambda spec: spec.cluster_ip or "") else: ingress = frontend_service.status.apply( lambda status: status.load_balancer.ingress[0]) frontend_ip = ingress.apply( lambda ingress: ingress.ip or ingress.hostname or "") pulumi.export("frontend_ip", frontend_ip)
password=admin_password)) container_app = web.ContainerApp("app", resource_group_name=resource_group.name, kube_environment_id=kube_env.id, configuration=web.ConfigurationArgs( ingress=web.IngressArgs( external=True, target_port=80 ), registries=[ web.RegistryCredentialsArgs( server=registry.login_server, username=admin_username, password_secret_ref="pwd") ], secrets=[ web.SecretArgs( name="pwd", value=admin_password) ], ), template=web.TemplateArgs( containers = [ web.ContainerArgs( name="myapp", image=my_image.image_name) ])) pulumi.export("url", container_app.configuration.apply(lambda c: c.ingress).apply(lambda i: i.fqdn))
# Copyright 2016-2018, Pulumi Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from pulumi import export from pulumi.runtime import invoke async def await_invoke(): return await invoke("test:index:MyFunction", {}) export("f1", asyncio.ensure_future(await_invoke())) export("f2", asyncio.ensure_future(await_invoke()))
kubernetes_version="1.12.5", dns_prefix="dns", agent_pool_profile=({ "name": "type1", "count": 3, "vmSize": "Standard_B2ms", "osType": "Linux", "maxPods": 110, "vnet_subnet_id": subnet.id }), linux_profile=({ "adminUsername": "******", "ssh_key": [{ "keyData": SSHKEY }] }), service_principal={ "clientId": app.application_id, "clientSecret": sppwd.value }, role_based_access_control={"enabled": "true"}, network_profile=({ "networkPlugin": "azure", "serviceCidr": "10.10.0.0/16", "dns_service_ip": "10.10.0.10", "dockerBridgeCidr": "172.17.0.1/16" }), __opts__=ResourceOptions(depends_on=[acr_assignment, subnet_assignment])) pulumi.export('kubeconfig', aks.kube_config_raw)
"server-network", resource_group_name=resource_group.name, location=resource_group.location, address_spaces=["10.0.0.0/16"], subnets=[ network.VirtualNetworkSubnetArgs(name="default", address_prefix="10.0.1.0/24") ], opts=ResourceOptions(parent=resource_group), ) subnet = network.Subnet( "server-subnet", resource_group_name=resource_group.name, virtual_network_name=net.name, address_prefixes=["10.0.2.0/24"], opts=ResourceOptions(parent=net), ) web_server = WebServer( "server", WebServerArgs( resource_group=resource_group, subnet=subnet, username=username, password=password, ), ) pulumi.export("public_ip", web_server.public_ip_addr)
# Copyright 2016-2018, Pulumi Corporation. All rights reserved. import pulumi from pulumi_azure import core subscription = pulumi.Output.from_input(core.get_subscription()) pulumi.export('sub-display-name', subscription.display_name)
import pulumi import pulumi_aws as aws logs = aws.s3.Bucket("logs") bucket = aws.s3.Bucket("bucket", loggings=[{ "targetBucket": logs.bucket, }]) pulumi.export("targetBucket", bucket.loggings[0]["targetBucket"])
"type": "VirtualMachineScaleSets", "vm_size": "Standard_DS2_v2", }], enable_rbac=True, kubernetes_version="1.18.14", linux_profile={ "admin_username": "******", "ssh": { "public_keys": [{ "key_data": ssh_key.public_key_openssh, }], }, }, node_resource_group=f"MC_azure-native-go_{managed_cluster_name}_westus", service_principal_profile={ "client_id": ad_app.application_id, "secret": ad_sp_password.value }) creds = pulumi.Output.all(resource_group.name, managed_cluster.name).apply( lambda args: containerservice.list_managed_cluster_user_credentials( resource_group_name=args[0], resource_name=args[1])) # Export kubeconfig encoded = creds.kubeconfigs[0].value kubeconfig = encoded.apply( lambda enc: base64.b64decode(enc).decode()) pulumi.export("kubeconfig", kubeconfig)
import pulumi import pulumi_mongodbatlas as mongodb config = pulumi.Config() org_id = config.require("orgId") project = mongodb.Project("demo-project", org_id=org_id) pulumi.export('project_name', project.name)
) spoke2 = Spoke( 's02', # stem of child resource names (<6 chars) SpokeProps( azure_bastion=config.azure_bastion, fw_rt_name=hub.fw_rt_name, hub=hub, peer=config.peer, reference=config.reference, resource_group_name=resource_group_name, spoke_address_space=str(next(config.stack_sn)), subnets=[ # extra columns for future ASGs ('web', 'any', 'app'), ('app', 'web', 'db'), ('db', 'app', 'none'), ], tags=config.default_tags, ), ) # export information about the stack required for stack peering export('dmz_ar', hub.dmz_ar) export('fw_ip', hub.fw_ip) export('hub_as', hub.address_space) export('hub_id', hub.id) export('s01_as', spoke1.address_space) export('s01_id', spoke1.id) export('s02_as', spoke2.address_space) export('s02_id', spoke2.id)
'Owner': cfg.require('owner'), 'PulumiProject': pulumi.get_project(), 'PulumiStack': pulumi.get_stack(), 'Customer': cfg.require_secret('customer') } opts = pulumi.ResourceOptions() if cfg.get_bool("local-mode"): opts.provider = aws.Provider(resource_name="localstack", access_key="integration-testing", secret_key="integration-testing", region="us-east-1", endpoints=[{ "s3": "http://localhost:4572" }], skip_credentials_validation=True, s3_force_path_style=True, skip_metadata_api_check=True, skip_requesting_account_id=True, skip_region_validation=True) # Provision an AWS S3 Bucket bucket = aws.s3.Bucket(resource_name=bucket_name, force_destroy=False, tags=tags, opts=opts) # Export the name of the S3 bucket pulumi.export('s3_bucket_name', bucket.id)
# Copyright 2016-2020, Pulumi Corporation. All rights reserved. import pulumi from pulumi_aws import ec2 from ami import get_linux_ami size = 't2.micro' group = ec2.SecurityGroup('web-secgrp', description='Enable HTTP access', ingress=[ ec2.SecurityGroupIngressArgs( protocol='tcp', from_port=80, to_port=80, cidr_blocks=['0.0.0.0/0']) ]) server = ec2.Instance('web-server-www', instance_type=size, security_groups=[group.name], ami=get_linux_ami(size)) pulumi.export('public_ip', server.public_ip) pulumi.export('public_dns', server.public_dns)
None, # list – An alias block. Conflicts with ttl & records. Alias record documented below. allow_overwrite= None, # bool – Allow creation of this record to overwrite an existing record, if any. This does not affect the ability to update the record using this provider and does not prevent other resources within this provider or manual Route 53 changes outside this provider from overwriting this record. false by default. This configuration is not recommended for most environments. failover_routing_policies= None, # list – A block indicating the routing behavior when associated health check fails. Conflicts with any other routing policy. Documented below. geolocation_routing_policies= None, # list – A block indicating a routing policy based on the geolocation of the requestor. Conflicts with any other routing policy. Documented below. health_check_id= None, # str – The health check the record should be associated with. latency_routing_policies= None, # list – A block indicating a routing policy based on the latency between the requestor and an AWS region. Conflicts with any other routing policy. Documented below. multivalue_answer_routing_policy= None, # bool – Set to true to indicate a multivalue answer routing policy. Conflicts with any other routing policy. name= None, # str – DNS domain name for a CloudFront distribution, S3 bucket, ELB, or another resource record set in this hosted zone. records= None, # list – A string list of records. To specify a single record value longer than 255 characters such as a TXT record for DKIM, add "" inside the configuration string (e.g. "first255characters""morecharacters"). set_identifier= None, # str – Unique identifier to differentiate records with routing policies from one another. Required if using failover, geolocation, latency, or weighted routing policies documented below. ttl=None, # float – The TTL of the record. type= None, # dict – PRIMARY or SECONDARY. A PRIMARY record will be served if its healthcheck is passing, otherwise the SECONDARY will be served. weighted_routing_policies= None, # list – A block indicating a weighted routing policy. Conflicts with any other routing policy. Documented below. zone_id= None # str – Hosted zone ID for a CloudFront distribution, S3 bucket, ELB, or Route 53 hosted zone. ) pulumi.export( 'ekscluster vpc_config', ekscluster.vpc_config['clusterSecurityGroupId'] ) # vpc_config is kinda a dict but you can't use typical python dict.get() on it cause it's actually a pulumi.output.Output type object
"node_size": "Standard_D2_v2" }, ] cluster_names = [] for config in aks_cluster_config: cluster = containerservice.KubernetesCluster( "aksCluster-%s" % config["name"], resource_group_name=resource_group.name, linux_profile=containerservice.KubernetesClusterLinuxProfileArgs( admin_username="******", ssh_key=containerservice.KubernetesClusterLinuxProfileSshKeyArgs( key_data=ssh_public_key, ), ), service_principal=containerservice. KubernetesClusterServicePrincipalArgs( client_id=ad_app.application_id, client_secret=ad_sp_password.value), location=config["location"], default_node_pool=containerservice. KubernetesClusterDefaultNodePoolArgs( name="aksagentpool", node_count=config["node_count"], vm_size=config["node_size"], ), dns_prefix="sample-kube", ) cluster_names.append(cluster.name) export("aks_cluster_names", Output.all(cluster_names))
database="SECONDDATABASE", schema="SECONDSCHEMA", file_format={ "type": "CSV", "null_if": ["NULL", "n"], "compression": "gzip", "record_delimiter": ';', "field_delimiter": "NONE", "encoding": "UTF8", "date_format": "AUTO", "skip_header": 1 }, copy_options={ "size_limit": 100, "on_error": "skip_file_45%", "match_by_column_name": "case_insensitive", }, provider=my_provider) pulumi.export('StorageIntegrationName', my_storage_integration.name) pulumi.export('StorageIntegrationType', my_storage_integration.type) pulumi.export('StorageIntegrationArn', my_storage_integration.storage_aws_role_arn) pulumi.export('FileFormatType', my_file_format.type) pulumi.export('FileFormatName', my_file_format.name) pulumi.export('FileFormatDatabase', my_file_format.database) pulumi.export('FileFormatSchema', my_file_format.schema) pulumi.export('StageName', my_stage.name)
}, }, "env": [{ "name": "REDIS", "value": "azure-vote-back", }], }], }, }, }, opts=pulumi.ResourceOptions(provider=k8s_provider)) azure_vote_front_service = kubernetes.core.v1.Service( "azure_vote_frontService", api_version="v1", kind="Service", metadata={ "name": "azure-vote-front", }, spec={ "type": "LoadBalancer", "ports": [{ "port": 80, }], "selector": { "app": "azure-vote-front", }, }, opts=pulumi.ResourceOptions(provider=k8s_provider)) pulumi.export("endpoint", azure_vote_front_service.status)
def CloudRun(img="gcr.io/leadmrktr/pasteit", location="asia-northeast1", project="leadmrktr"): cr = gcp.cloudrun.Service( "pasteit", location=location, project=project, template={ "spec": { "containers": [{ "image": img, }], }, }, traffics=[{ "latestRevision": True, "percent": 100, }], ) gcp.cloudrun.IamMember("enable_for_all", service=cr.name, project=project, location=location, role="roles/run.invoker", member="allUsers") return cr cloud_run = CloudRun() pulumi.export("url", cloud_run.status["url"]) pulumi.export("cloud run", cloud_run)
'updateBehavior': 'UPDATE_IN_DATABASE' }) # create glue workflows and triggers print(f'reading workflow definitions from directory => {workflow_dir}') for filepath in glob.glob(workflow_dir + '/*.yaml'): wf_name = os.path.splitext(os.path.basename(filepath))[0] # read workflow definition from file wf_map = WorkflowDefinition(filepath) # create workflow using file name as name, # pass map of glue jobs as it holds actual job names wf = JobWorkflow(wf_name, wf_definition=wf_map, glue_jobs_map=gluejobs) pulumi.export('datalake_bucket', infra.datalake_bucket.bucket) pulumi.export('fileproc_bucket', infra.fileproc_bucket.bucket) pulumi.export('scripts_bucket', infra.scripts_bucket.bucket) pulumi.export('kms_key', infra.kms_key.arn) pulumi.export('glue_security_config', infra.glue_security_config.name) pulumi.export('policy_kms_full_usage', infra.policy_kms_full_usage.arn) pulumi.export('policy_kms_encrypt_only', infra.policy_kms_encrypt_only.arn) pulumi.export('policy_get_object_pii', infra.policy_get_object_pii.arn) pulumi.export('policy_get_object_confidential', infra.policy_get_object_confidential.arn) pulumi.export('policy_get_object_nonsensitive', infra.policy_get_object_nonsensitive.arn) pulumi.export('policy_get_scripts', infra.policy_get_scripts.arn) pulumi.export('policy_glue_service', infra.policy_glue_service.arn)
import pulumi_aws as aws # Create a bucket and expose a website index document site_bucket = aws.s3.Bucket("siteBucket", website={ "indexDocument": "index.html", }) site_dir = "www" # For each file in the directory, create an S3 object stored in `siteBucket` files = [] for range in [{"key": k, "value": v} for [k, v] in enumerate(os.listdir(site_dir))]: files.append(aws.s3.BucketObject(f"files-{range['key']}", bucket=site_bucket.id, key=range["value"], source=pulumi.FileAsset(f"{site_dir}/{range['value']}"), content_type=(lambda: raise Exception("FunctionCallExpression: mimeType (aws-s3-folder.pp:19,16-37)"))())) # set the MIME type of the file # Set the access policy for the bucket so all objects are readable bucket_policy = aws.s3.BucketPolicy("bucketPolicy", bucket=site_bucket.id, policy=site_bucket.id.apply(lambda id: json.dumps({ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": [f"arn:aws:s3:::{id}/*"], }], }))) pulumi.export("bucketName", site_bucket.bucket) pulumi.export("websiteUrl", site_bucket.website_endpoint)
# Create a resource using the output object of another resource. res2 = MyResource("testres2", additional=AdditionalArgs( first_value=res.additional.first_value, second_value=res.additional.second_value)) # Create a resource using the output object of another resource, accessing the output as a dict. res3 = MyResource("testres3", additional=AdditionalArgs( first_value=res.additional["first_value"], second_value=res.additional["second_value"])) # Create a resource using a dict as the input. # Note: These are camel case (not snake_case) since the resource does not do any translation of # property names. res4 = MyResource("testres4", additional={ "firstValue": "hello", "secondValue": 42, }) export("res_first_value", res.additional.first_value) export("res_second_value", res.additional.second_value) export("res2_first_value", res2.additional.first_value) export("res2_second_value", res2.additional.second_value) export("res3_first_value", res3.additional.first_value) export("res3_second_value", res3.additional.second_value) export("res4_first_value", res4.additional.first_value) export("res4_second_value", res4.additional.second_value)
import pulumi import pulumi_aws as aws foo_local_gateways = aws.ec2.get_local_gateways(tags={ "service": "production", }) pulumi.export("foo", foo_local_gateways.ids)