def get(resource_name, id, opts=None): """ Get the state of an existing `VolumeAttachment` resource, as identified by `id`. The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted, then (per Kubernetes convention) the ID becomes `default/[name]`. Pulumi will keep track of this resource using `resource_name` as the Pulumi ID. :param str resource_name: _Unique_ name used to register this resource with Pulumi. :param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve. Takes the form `[namespace]/[name]` or `[name]`. :param Optional[pulumi.ResourceOptions] opts: A bag of options that control this resource's behavior. """ opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) return VolumeAttachment(resource_name, opts)
def route_to_virtual_appliance( stem, route_table_name, address_prefix, next_hop_ip_address, ): r_va = network.Route( f'{stem}{s}r', resource_group_name=resource_group_name, address_prefix=address_prefix, next_hop_type='VirtualAppliance', next_hop_in_ip_address=next_hop_ip_address, route_table_name=route_table_name, opts=ResourceOptions(parent=self), ) return r_va
def bucket_object_converter(filepath): """ Takes a file path and returns an bucket object managed by Pulumi """ relative_path = filepath.replace(web_contents_root_path + '/', '') # Determine the mimetype using the `mimetypes` module. mime_type, _ = mimetypes.guess_type(filepath) content_file = pulumi_aws.s3.BucketObject( relative_path, key=relative_path, acl='public-read', bucket=content_bucket.id, content_type=mime_type, source=FileAsset(filepath), opts=ResourceOptions(parent=content_bucket) )
def subnet( stem, virtual_network_name, address_prefix, route_table_id, depends_on=None, ): sn = network.Subnet( f'{stem}{s}sn', subnet_name=f'{stem}{s}sn{s}{suffix}', resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, address_prefix=address_prefix, route_table=network.RouteTableArgs(id=route_table_id, ), opts=ResourceOptions(parent=self, depends_on=depends_on), ) return sn
def __init__( self, name, file_system: FileSystem, path, opts: ResourceOptions = None ): super().__init__("redata:cluster:FileSystem", name, {}, opts) self.ap = aws.efs.AccessPoint( name, file_system_id=file_system.efs.id, posix_user=aws.efs.AccessPointPosixUserArgs(uid=1000, gid=1000), root_directory=aws.efs.AccessPointRootDirectoryArgs( path=path, creation_info=aws.efs.AccessPointRootDirectoryCreationInfoArgs( owner_uid=1000, owner_gid=1000, permissions="755" ), ), opts=ResourceOptions(parent=self), ) self.policy_document = Output.all(file_system.efs.arn, self.ap.arn).apply( lambda args: json.dumps( { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "elasticfilesystem:ClientMount", "elasticfilesystem:ClientWrite", ], "Resource": args[0], "Condition": { "StringEquals": { "elasticfilesystem:AccessPointArn": args[1] } }, } ], } ) ) self.register_outputs({})
def subnet_special( stem, name, virtual_network_name, address_prefix, depends_on=[], ): sn = network.Subnet( f'{stem}-sn', name=name, resource_group_name=resource_group_name, address_prefixes=[address_prefix], virtual_network_name=virtual_network_name, opts=ResourceOptions( parent=self, delete_before_replace=True, depends_on=depends_on, ), ) return sn
def vnet_peering( stem, virtual_network_name, peer, remote_virtual_network_id, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, ): vnp = network.VirtualNetworkPeering( f'{stem}-{peer}-vnp-', resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, remote_virtual_network_id=remote_virtual_network_id, allow_forwarded_traffic=allow_forwarded_traffic, allow_gateway_transit=allow_gateway_transit, use_remote_gateways=use_remote_gateways, allow_virtual_network_access=True, opts=ResourceOptions(parent=self), ) return vnp
def subnet_special( stem, name, virtual_network_name, address_prefix, route_table_id, depends_on=None, ): sn = network.Subnet( f'{stem}{s}sn', subnet_name=name, resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, address_prefix=address_prefix, route_table=network.RouteTableArgs(id=route_table_id, ), opts=ResourceOptions( parent=self, delete_before_replace=True, depends_on=depends_on, ), ) return sn
def get(resource_name, api_version, kind, id, opts=None): """ Get the state of an existing `CustomResource` resource, as identified by `id`. Typically this ID is of the form [namespace]/[name]; if [namespace] is omitted, then (per Kubernetes convention) the ID becomes default/[name]. Pulumi will keep track of this resource using `resource_name` as the Pulumi ID. :param str resource_name: _Unique_ name used to register this resource with Pulumi. :param str api_version: The API version of the apiExtensions.CustomResource we wish to select, as specified by the CustomResourceDefinition that defines it on the API server. :param str kind: The kind of the apiextensions.CustomResource we wish to select, as specified by the CustomResourceDefinition that defines it on the API server. :param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve. Takes the form <namespace>/<name> or <name>. :param Optional[pulumi.ResourceOptions] opts: A bag of options that control this resource's behavior. """ opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) return CustomResource(resource_name=resource_name, api_version=api_version, kind=kind, opts=opts)
def vnet_peering( stem, virtual_network_name, peer, remote_virtual_network_id, allow_forwarded_traffic=None, allow_gateway_transit=None, use_remote_gateways=None, depends_on=None, ): vnp = network.VirtualNetworkPeering( f'{stem}{s}{peer}{s}vnp', virtual_network_peering_name=f'{stem}{s}{peer}{s}vnp{s}{suffix}', resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, remote_virtual_network=network.SubResourceArgs( id=remote_virtual_network_id), allow_forwarded_traffic=allow_forwarded_traffic, allow_gateway_transit=allow_gateway_transit, use_remote_gateways=use_remote_gateways, allow_virtual_network_access=True, opts=ResourceOptions(parent=self, depends_on=depends_on), ) return vnp
def __init__(self, name, opts=ResourceOptions()): super().__init__("my:module:Component3", name, None, opts) mycomp2 = Component2( name + "-child", ResourceOptions(aliases=[Alias(parent=opts.parent)], parent=self))
def generate_dynamo_data_source(self, graphql_api, type_name): """ Generates a DynamoDB data source for the given GraphQL type. This includes the Dynamo table, the AppSync data source, a data source role, and the resolvers. NOTE: This function generates Dynamo tables with a hash key called `id`, but no other keys. :param type_name The name of the GraphQL type. This is the identifier which appears after the `type` keyword in the schema. """ table = dynamodb.Table( f"{self.stack_name}_{type_name}_table", name=f"{self.stack_name}_{self.random_chars}.{type_name}", hash_key="id", attributes=[{"name": "id", "type": "S"}], # stream_view_type="NEW_AND_OLD_IMAGES", billing_mode="PAY_PER_REQUEST", ) data_source_iam_role = iam.Role( f"{self.stack_name}_{type_name}_role", assume_role_policy="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "appsync.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""", ) aws_region = config.region account_id = get_caller_identity().account_id data_source_iam_role_policy = iam.RolePolicy( f"{self.stack_name}_{type_name}_role_policy", role=data_source_iam_role.name, name="MyDynamoDBAccess", policy=table.name.apply( lambda table_name: f"""{{ "Version": "2012-10-17", "Statement": [ {{ "Effect": "Allow", "Action": [ "dynamodb:BatchGetItem", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:DeleteItem", "dynamodb:GetItem", "dynamodb:Scan", "dynamodb:Query", "dynamodb:UpdateItem" ], "Resource": [ "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}", "arn:aws:dynamodb:{aws_region}:{account_id}:table/{table_name}/*" ] }} ] }}""" ), ) data_source = appsync.DataSource( f"{self.stack_name}_{type_name}_data_source", api_id=graphql_api.id, name=f"{type_name}TableDataSource_{self.random_chars}", type="AMAZON_DYNAMODB", service_role_arn=data_source_iam_role.arn, dynamodb_config={"table_name": table.name}, opts=ResourceOptions(depends_on=[data_source_iam_role]), ) resolvers = self.generate_resolvers(graphql_api, type_name, data_source) return { "table": table, "data_source_iam_role": data_source_iam_role, "data_source_iam_role_policy": data_source_iam_role_policy, "data_source": data_source, "resolvers": resolvers, }
# The creation of the component is unchanged. comp2 = Component1("comp2") # Scenario 3: adopt this resource into a new parent. class Component2(ComponentResource): def __init__(self, name, opts=None): super().__init__("my:module:Component2", name, None, opts) # validate that "parent: undefined" means "i didn't have a parent previously" unparented_comp2 = Component2( "unparented", ResourceOptions(aliases=[Alias(parent=ROOT_STACK_RESOURCE)], parent=comp2)) # Scenario 4: Make a child resource that is parented by opts instead of 'this'. Fix in the next # step to be parented by this. Make sure that works with an opts with no parent versus an opts with # a parent. class Component3(ComponentResource): def __init__(self, name, opts=ResourceOptions()): super().__init__("my:module:Component3", name, None, opts) mycomp2 = Component2( name + "-child", ResourceOptions(aliases=[Alias(parent=opts.parent)], parent=self)) parented_by_stack_comp3 = Component3("parentedbystack")
from pulumi_aws.config.vars import region from pulumi import Config, ResourceOptions, export def require_region(): """ require_region fetches the AWS region, requiring that it exists. if it does not exist, an exception is raised. """ if not region: raise Exception('No AWS region has been configured') return region config = Config() role_to_assume_arn = config.require('roleToAssumeARN') provider = aws.Provider('privileged', assume_role={ 'role_arn': role_to_assume_arn, 'session_name': 'PulumiSession', 'externalId': 'PulumiApplication', }, region=require_region()) # Creates an AWS resource (S3 Bucket) bucket = aws.s3.Bucket('my-bucket', opts=ResourceOptions(provider=provider)) # Exports the DNS name of the bucket export('bucket_name', bucket.bucket_domain_name)
container_definitions=json.dumps([{ "name": "my-app", "image": "nginx", "portMappings": [{ "containerPort": 80, "hostPort": 80, "protocol": "tcp" }] }]) ) service = aws.ecs.Service("app-svc", cluster=cluster.arn, desired_count=1, launch_type="FARGATE", task_definition=task_definition.arn, network_configuration={ "assign_public_ip": "true", "subnets": default_vpc_subnets.ids, "security_groups": [group.id] }, load_balancers=[{ "target_group_arn": atg.arn, "container_name": "my-app", "container_port": 80 }], __opts__=ResourceOptions(depends_on=[wl]) ) export("url", alb.dns_name)
from pulumi_aws.mwaa import Environment, EnvironmentNetworkConfigurationArgs from pulumi import ResourceOptions from iam import executionRole from s3 import bucket from vpc import privateSubnet1, privateSubnet2, securityGroup from data_engineering_pulumi_components.utils import Tagger tagger = Tagger(environment_name="dev") Environment( resource_name="airflow", airflow_version="1.10.12", dag_s3_path="dags", environment_class="mw1.small", execution_role_arn=executionRole.arn, max_workers=1, min_workers=1, name="TestEnvironment", source_bucket_arn=bucket.arn, network_configuration=EnvironmentNetworkConfigurationArgs( security_group_ids=[securityGroup.id], subnet_ids=[privateSubnet1.id, privateSubnet2.id], ), requirements_s3_path="requirements.txt", tags=tagger.create_tags("TestEnvironment"), webserver_access_mode="PUBLIC_ONLY", opts=ResourceOptions(import_="TestEnvironment"), )
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pulumi import CustomResource, ProviderResource, ResourceOptions class Provider(ProviderResource): def __init__(self, name, opts=None): ProviderResource.__init__(self, "test", name, {}, opts) class Resource(CustomResource): def __init__(self, name, opts=None): CustomResource.__init__(self, "test:index:Resource", name, {}, opts) # Create a Provider that we'll use to create other resources. prov = Provider("testprov") # Use this Provider to create a resource. res = Resource("testres", ResourceOptions(provider=prov))
actions=["s3:*"], effect="Allow", resources=[ "arn:aws:s3:::data-engineering-pulumi.analytics.justice.gov.uk", "arn:aws:s3:::data-engineering-pulumi.analytics.justice.gov.uk/*", ], sid="S3Policy", ), ] ) serviceRolePolicy = RolePolicy( resource_name="this", policy=policy.json, role=serviceRole.id, opts=ResourceOptions(parent=serviceRole), ) sourceCredential = SourceCredential( resource_name="this", auth_type="PERSONAL_ACCESS_TOKEN", server_type="GITHUB", token=GITHUB_TOKEN, ) pullRequestProject = Project( resource_name="pull-request", name="database-access-pull-request", description="Runs pulumi preview on database access pull requests", service_role=serviceRole.arn, cache=ProjectCacheArgs(type="NO_CACHE"),
def __init__(self, name, credentials, resources, image=None, opts=None): super(Instance, self).__init__("jenkins:jenkins:Instance", name, { "credentials": credentials, "resources": resources, "image": image }, opts) # The Secret will contain the root password for this instance. secret = Secret( name + "-secret", metadata={ "name": name, }, type="Opaque", data={ "jenkins-password": str( base64.b64encode(bytes(credentials["password"], "utf-8"), None), "utf-8"), }, opts=ResourceOptions(parent=self), ) # The PVC provides persistent storage for Jenkins states. pvc = PersistentVolumeClaim( name + "-pvc", metadata={ "name": name, }, spec={ "accessModes": ["ReadWriteOnce"], "resources": { "requests": { "storage": "8Gi", }, }, }, opts=ResourceOptions(parent=self), ) # The Deployment describes the desired state for our Jenkins setup. deploymentArgs = createDeploymentArgs(name, credentials, resources, image) deployment = Deployment( name + "-deploy", metadata=deploymentArgs["metadata"], spec=deploymentArgs["spec"], opts=ResourceOptions(parent=self), ) # The Service exposes Jenkins to the external internet by providing load-balanced ingress for HTTP and HTTPS. service = Service(name + "-service", metadata={ "name": name, }, spec={ "type": "LoadBalancer", "ports": [ { "name": "http", "port": 80, "targetPort": "http", }, { "name": "https", "port": 443, "targetPort": "https", }, ], "selector": { "app": name, }, }, opts=ResourceOptions(parent=self)) # This component resource has no outputs. self.register_outputs({})
# Copyright 2016-2018, Pulumi Corporation. All rights reserved. from pulumi import Alias, ComponentResource, export, Resource, ResourceOptions, create_urn, ROOT_STACK_RESOURCE class Resource1(ComponentResource): def __init__(self, name, opts=None): super().__init__("my:module:Resource", name, None, opts) # Scenario #3 - rename a component (and all it's children) # No change to the component... class ComponentThree(ComponentResource): def __init__(self, name, opts=None): super().__init__("my:module:ComponentThree", name, None, opts) # Note that both un-prefixed and parent-name-prefixed child names are supported. For the # later, the implicit alias inherited from the parent alias will include replacing the name # prefix to match the parent alias name. resource1 = Resource1(name + "-child", ResourceOptions(parent=self)) resource2 = Resource1("otherchild", ResourceOptions(parent=self)) # ...but applying an alias to the instance successfully renames both the component and the children. comp3 = ComponentThree("newcomp3", ResourceOptions(aliases=[Alias(name="comp3")]))