Example #1
0
 def __init__(self) -> None:
     self.consul = pulumi.StackReference(
         f"grapl/consul/{config.STACK_NAME}")
     self.nomad_server = pulumi.StackReference(
         f"grapl/nomad/{config.STACK_NAME}")
     self.networking = pulumi.StackReference(
         f"grapl/networking/{config.STACK_NAME}")
     self.nomad_agents = pulumi.StackReference(
         f"grapl/nomad-agents/{config.STACK_NAME}")
Example #2
0
def define_resources():
    mycomponent = MyComponent("mycomponent", inprop="hello")
    myinstance = Instance("instance",
                          name="myvm",
                          value=pulumi.Output.secret("secret_value"))
    mycustom = MyCustom("mycustom", {"instance": myinstance})
    invoke_result = do_invoke()
    myremotecomponent = MyRemoteComponent(
        "myremotecomponent",
        inprop=myinstance.id.apply(lambda v: f"hello: {v}"))

    # Pass myinstance several more times to ensure deserialization of the resource reference
    # works on other asyncio threads.
    for x in range(5):
        MyCustom(f"mycustom{x}", {"instance": myinstance})

    dns_ref = pulumi.StackReference("dns")

    pulumi.export("hello", "world")
    pulumi.export("outprop", mycomponent.outprop)
    pulumi.export("public_ip", myinstance.public_ip)

    return {
        'mycomponent': mycomponent,
        'myinstance': myinstance,
        'mycustom': mycustom,
        'dns_ref': dns_ref,
        'invoke_result': invoke_result,
        'myremotecomponent': myremotecomponent,
    }
Example #3
0
    def __init__(self, stack_name: str) -> None:
        self.upstream_stack_name = (
            "local-grapl" if config.LOCAL_GRAPL else f"grapl/grapl/{stack_name}"
        )
        ref = pulumi.StackReference(self.upstream_stack_name)

        def require_str(key: str) -> str:
            return cast(str, ref.require_output(key))

        self.aws_env_vars_for_local = require_str("aws-env-vars-for-local")
        self.analyzer_bucket = require_str("analyzers-bucket")
        self.redis_endpoint = require_str("redis-endpoint")
        self.schema_properties_table_name = require_str("schema-properties-table")
        self.schema_table_name = require_str("schema-table")
        self.sysmon_generator_queue = require_str("sysmon-generator-queue")
        self.sysmon_log_bucket = require_str("sysmon-log-bucket")
        self.test_user_name = require_str("test-user-name")

        self.plugin_work_queue_db_hostname = require_str(
            "plugin-work-queue-db-hostname"
        )
        self.plugin_work_queue_db_port = require_str("plugin-work-queue-db-port")
        self.plugin_work_queue_db_username = require_str(
            "plugin-work-queue-db-username"
        )
        self.plugin_work_queue_db_password = require_str(
            "plugin-work-queue-db-password"
        )

        self.organization_management_db_hostname = require_str(
            "organization-management-db-hostname"
        )
        self.organization_management_db_port = require_str(
            "organization-management-db-port"
        )
        self.organization_management_db_username = require_str(
            "organization-management-db-username"
        )
        self.organization_management_db_password = require_str(
            "organization-management-db-password"
        )

        self.kafka_bootstrap_servers = require_str("kafka-bootstrap-servers")
        self.kafka_e2e_sasl_username = require_str("kafka-e2e-sasl-username")
        self.kafka_e2e_sasl_password = require_str("kafka-e2e-sasl-password")
        self.kafka_e2e_consumer_group_name = require_str(
            "kafka-e2e-consumer-group-name"
        )
        self.test_user_password_secret_id = require_str("test-user-password-secret-id")
Example #4
0
import pulumi

config = pulumi.Config()

exporterStackName = config.require('exporter_stack_name')
org = config.require('org')
a = pulumi.StackReference(f'{org}/exporter/{exporterStackName}')

pulumi.export('val1', a.require_output('val'))
pulumi.export('val2', pulumi.Output.secret(['d', 'x']))
Example #5
0
import pulumi
from pulumi_azure_nextgen.resources import latest as resources
from pulumi_azure_nextgen.network import latest as network
from pulumi_azure_nextgen.compute import latest as compute

from pprint import pprint

import json
import os
import random
import string

stackName = pulumi.get_stack()
infrastructure = pulumi.StackReference(f"DevSecNinja/infrastructure/{stackName}")

# Check if the stackName is either develop or production
if stackName == "develop":
    print("Pulumi Stack is develop")
elif stackName == "production":
    print("Pulumi Stack is production - be careful")
else:
    raise AssertionError("We can only serve the develop and production Pulumi stack")

# Import generic variables from JSON
with open("../../generic/json/generic.json") as generic_params_file:
    generic_params_data = json.load(generic_params_file)
    print("Generic parameter file contents:")
    pprint(generic_params_data)

# Import environment-specific variables from JSON
env_params_file_location = str("../../generic/json/compute-" + stackName + ".json")
Example #6
0
"""A Python Pulumi program"""

import pulumi

# get the current stack we're in
stack = pulumi.get_stack()

# get the Pulumi organization we're in
org = 'my-org'

# get a stack reference
stack_ref = pulumi.StackReference(f"{org}/stack-1/{stack}")

# retrieve a value from that stack reference
exported_value_from_other_stack = stack_ref.get_output("exported_value")

pulumi.export("exported_value", exported_value_from_other_stack)

Example #7
0
import pulumi
import pulumi_kubernetes as k8s
import pulumi_kubernetes.helm.v3 as helm
from pulumi_kubernetes.core.v1 import Namespace
from pulumi_kubernetes.storage.v1 import StorageClass

# Get the stack
stack = pulumi.get_stack()
sr = "jaxxstorm/cluster/{}".format(stack)
stack_ref = pulumi.StackReference(sr)
# Get the kubeconfig from the stack
kubeconfig = stack_ref.get_output("kubeConfig")

# Get configuration options
config = pulumi.Config()
namespace = config.require("namespace")

# Set up the provider
provider = k8s.Provider("home.lbrlabs", kubeconfig=kubeconfig)

# Create the namespace
ns = Namespace(
    "ns",
    metadata={
        "name": namespace,
    },
    opts=pulumi.ResourceOptions(provider=provider),
)

# Install the helm chart
helm.Chart(
Example #8
0
## Exercise 1: Add stack references to get the kubeconfig from the "base_cluster" stack.
# Doc: https://www.pulumi.com/docs/intro/concepts/stack/#stackreferences

import pulumi
from pulumi import ResourceOptions
import pulumi_kubernetes as k8s
from pulumi_kubernetes.helm.v3 import Chart, ChartOpts

## Exercise 1
# Using config data to get the name of the base stack.
# The Pulumi automation API or other methods could be used to automate the config value.
config = pulumi.Config()
base_cluster_stack_name = config.require("base_cluster_stack")
base_cluster_stack = pulumi.StackReference(base_cluster_stack_name)
kubeconfig = base_cluster_stack.get_output("kubeconfig")

# The K8s provider which supplies the helm chart resource needs to know how to talk to the K8s cluster.
# So, instantiate a K8s provider using the retrieved kubeconfig.
k8s_provider = k8s.Provider('k8s-provider', kubeconfig=kubeconfig)

# Create a chart resource to deploy apache using the k8s provider instantiated above.
apache = Chart('apache-chart',
    ChartOpts(
        chart='apache',
        version='8.3.2',
        fetch_opts={'repo': 'https://charts.bitnami.com/bitnami'}),
    opts=ResourceOptions(provider=k8s_provider))

# Get the helm-deployed apache service IP which isn't known until the chart is deployed.
apache_service_ip = apache.get_resource('v1/Service', 'apache-chart').apply(
    lambda res: res.status.load_balancer.ingress[0].ip)
Example #9
0
import pulumi
import pulumi_aws as aws

# read local config settings
config = pulumi.Config()
node_type = config.require("node_type")
node_port = config.require("node_port")
vpc_stack = config.require("vpc_stack")
ec2_stack = config.require("ec2_stack")

# get stack reference from vpc and ec2
vpc = pulumi.StackReference(vpc_stack)
ec2 = pulumi.StackReference(ec2_stack)

# get vpc/subnet/securitygroup
vpc_id = vpc.get_output("vpc_id")
vpc_azs = vpc.get_output("vpc_azs")
private_subnets = vpc.get_output("private_subnets")
private_subnet_ids = vpc.get_output("private_subnet_ids")
api_sg_id = ec2.get_output("api_sg_id")

# create redis security group
redis_sg = aws.ec2.SecurityGroup(
    resource_name="redis access - {}".format(node_port),
    vpc_id=vpc_id,
    description="Enable Redis access",
    ingress=[{
        "protocol": "tcp",
        "from_port": node_port,
        "to_port": node_port,
        "security_groups": [api_sg_id],
Example #10
0
import pulumi

stack_ref = pulumi.StackReference("stackRef", stack_name="foo/bar/dev")
Example #11
0
def main():  # pylint: disable=too-many-locals,too-many-branches
    """Main entry point."""

    # Fetch configuration.
    config = pulumi.Config()
    enable_release = config.get_bool('enable_release')
    archive_age = config.get_int('archive_age') or 30

    dataset = pulumi.get_stack()

    organization = gcp.organizations.get_organization(domain=DOMAIN)
    project_id = gcp.organizations.get_project().project_id

    dependency_stacks = {}
    for dependency in config.get_object('depends_on') or ():
        dependency_stacks[dependency] = pulumi.StackReference(dependency)

    def org_role_id(id_suffix: str) -> str:
        return f'{organization.id}/roles/{id_suffix}'

    lister_role_id = org_role_id('StorageLister')
    viewer_creator_role_id = org_role_id('StorageViewerAndCreator')
    viewer_role_id = org_role_id('StorageObjectAndBucketViewer')

    # The Cloud Resource Manager API is required for the Cloud Identity API.
    cloudresourcemanager = gcp.projects.Service(
        'cloudresourcemanager-service',
        service='cloudresourcemanager.googleapis.com',
        disable_on_destroy=False,
    )

    # The Cloud Identity API is required for creating access groups and service accounts.
    cloudidentity = gcp.projects.Service(
        'cloudidentity-service',
        service='cloudidentity.googleapis.com',
        disable_on_destroy=False,
        opts=pulumi.resource.ResourceOptions(
            depends_on=[cloudresourcemanager]),
    )

    # Enable Dataproc until the Hail Query Service is ready.
    _ = gcp.projects.Service(
        'dataproc-service',
        service='dataproc.googleapis.com',
        disable_on_destroy=False,
    )

    service_accounts = defaultdict(list)
    for kind in 'hail', 'deployment':
        for access_level in ACCESS_LEVELS:
            service_account = config.get(
                f'{kind}_service_account_{access_level}')
            if service_account:
                service_accounts[kind].append((access_level, service_account))

    # Create Dataproc and Cromwell service accounts.
    for kind in 'dataproc', 'cromwell':
        service_accounts[kind] = []
        for access_level in ACCESS_LEVELS:
            account = gcp.serviceaccount.Account(
                f'{kind}-service-account-{access_level}',
                account_id=f'{kind}-{access_level}',
                opts=pulumi.resource.ResourceOptions(
                    depends_on=[cloudidentity]),
            )
            service_accounts[kind].append((access_level, account.email))

    def service_accounts_gen():
        for kind, values in service_accounts.items():
            for access_level, service_account in values:
                yield kind, access_level, service_account

    def bucket_name(kind: str) -> str:
        """Returns the bucket name for the given dataset."""
        return f'cpg-{dataset}-{kind}'

    def create_bucket(name: str,
                      enable_versioning=True,
                      **kwargs) -> gcp.storage.Bucket:
        """Returns a new GCS bucket."""
        return gcp.storage.Bucket(
            name,
            name=name,
            location=REGION,
            uniform_bucket_level_access=True,
            versioning=gcp.storage.BucketVersioningArgs(
                enabled=enable_versioning),
            labels={'bucket': name},
            **kwargs,
        )

    def bucket_member(*args, **kwargs):
        """Wraps gcp.storage.BucketIAMMember.

        When resources are renamed, it can be useful to explicitly apply changes in two
        phases: delete followed by create; that's opposite of the default create followed by
        delete, which can end up with missing permissions. To implement the first phase
        (delete), simply change this implementation to a no-op temporarily.
        """
        gcp.storage.BucketIAMMember(*args, **kwargs)

    undelete_rule = gcp.storage.BucketLifecycleRuleArgs(
        action=gcp.storage.BucketLifecycleRuleActionArgs(type='Delete'),
        condition=gcp.storage.BucketLifecycleRuleConditionArgs(
            age=30, with_state='ARCHIVED'),
    )

    main_upload_account = gcp.serviceaccount.Account(
        'main-upload-service-account',
        account_id='main-upload',
        display_name='main-upload',
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    main_upload_buckets = {
        'main-upload':
        create_bucket(bucket_name('main-upload'),
                      lifecycle_rules=[undelete_rule])
    }

    for additional_upload_bucket in (
            config.get_object('additional_upload_buckets') or ()):
        main_upload_buckets[additional_upload_bucket] = create_bucket(
            additional_upload_bucket, lifecycle_rules=[undelete_rule])

    test_upload_bucket = create_bucket(bucket_name('test-upload'),
                                       lifecycle_rules=[undelete_rule])

    # Grant admin permissions as composite uploads need to delete temporary files.
    for bname, upload_bucket in main_upload_buckets.items():
        bucket_member(
            f'main-upload-service-account-{bname}-bucket-creator',
            bucket=upload_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('serviceAccount:',
                                        main_upload_account.email),
        )

    archive_bucket = create_bucket(
        bucket_name('archive'),
        lifecycle_rules=[
            gcp.storage.BucketLifecycleRuleArgs(
                action=gcp.storage.BucketLifecycleRuleActionArgs(
                    type='SetStorageClass', storage_class='ARCHIVE'),
                condition=gcp.storage.BucketLifecycleRuleConditionArgs(
                    age=archive_age),
            ),
            undelete_rule,
        ],
    )

    test_bucket = create_bucket(bucket_name('test'),
                                lifecycle_rules=[undelete_rule])

    # tmp buckets don't have an undelete lifecycle rule, to avoid paying for
    # intermediate results that get cleaned up immediately after workflow runs.
    test_tmp_bucket = create_bucket(
        bucket_name('test-tmp'),
        enable_versioning=False,
        lifecycle_rules=[
            gcp.storage.BucketLifecycleRuleArgs(
                action=gcp.storage.BucketLifecycleRuleActionArgs(
                    type='Delete'),
                condition=gcp.storage.BucketLifecycleRuleConditionArgs(
                    age=TMP_BUCKET_PERIOD_IN_DAYS),
            )
        ],
    )

    test_analysis_bucket = create_bucket(bucket_name('test-analysis'),
                                         lifecycle_rules=[undelete_rule])

    test_web_bucket = create_bucket(bucket_name('test-web'),
                                    lifecycle_rules=[undelete_rule])

    main_bucket = create_bucket(bucket_name('main'),
                                lifecycle_rules=[undelete_rule])

    # tmp buckets don't have an undelete lifecycle rule, to avoid paying for
    # intermediate results that get cleaned up immediately after workflow runs.
    main_tmp_bucket = create_bucket(
        bucket_name('main-tmp'),
        enable_versioning=False,
        lifecycle_rules=[
            gcp.storage.BucketLifecycleRuleArgs(
                action=gcp.storage.BucketLifecycleRuleActionArgs(
                    type='Delete'),
                condition=gcp.storage.BucketLifecycleRuleConditionArgs(
                    age=TMP_BUCKET_PERIOD_IN_DAYS),
            )
        ],
    )

    main_analysis_bucket = create_bucket(bucket_name('main-analysis'),
                                         lifecycle_rules=[undelete_rule])

    main_web_bucket = create_bucket(bucket_name('main-web'),
                                    lifecycle_rules=[undelete_rule])

    def group_mail(dataset: str, kind: str) -> str:
        """Returns the email address of a permissions group."""
        return f'{dataset}-{kind}@{DOMAIN}'

    def create_group(mail: str) -> gcp.cloudidentity.Group:
        """Returns a new Cloud Identity group for the given email address."""
        name = mail.split('@')[0]
        return gcp.cloudidentity.Group(
            name,
            display_name=name,
            group_key=gcp.cloudidentity.GroupGroupKeyArgs(id=mail),
            labels={
                'cloudidentity.googleapis.com/groups.discussion_forum': ''
            },
            parent=f'customers/{CUSTOMER_ID}',
            opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
        )

    def create_secret(resource_name: str, secret_id: str, **kwargs):
        return gcp.secretmanager.Secret(
            resource_name,
            secret_id=secret_id,
            replication=gcp.secretmanager.SecretReplicationArgs(
                user_managed=gcp.secretmanager.
                SecretReplicationUserManagedArgs(replicas=[
                    gcp.secretmanager.SecretReplicationUserManagedReplicaArgs(
                        location='australia-southeast1', ),
                ], ), ),
            opts=pulumi.resource.ResourceOptions(depends_on=[secretmanager]),
            **kwargs,
        )

    def add_access_group_cache_as_secret_member(secret, resource_prefix: str):
        gcp.secretmanager.SecretIamMember(
            f'{resource_prefix}-group-cache-secret-accessor',
            secret_id=secret.id,
            role='roles/secretmanager.secretAccessor',
            member=f'serviceAccount:{ACCESS_GROUP_CACHE_SERVICE_ACCOUNT}',
        )

        gcp.secretmanager.SecretIamMember(
            f'{resource_prefix}-group-cache-secret-version-manager',
            secret_id=secret.id,
            role='roles/secretmanager.secretVersionManager',
            member=f'serviceAccount:{ACCESS_GROUP_CACHE_SERVICE_ACCOUNT}',
        )

    access_group_mail = group_mail(dataset, 'access')
    access_group = create_group(access_group_mail)
    web_access_group = create_group(group_mail(dataset, 'web-access'))

    # other stacks require the access group to exist
    pulumi.export(
        access_group_mail.split('@')[0] + '-group-id', access_group.id)

    # Create groups for each access level.
    access_level_groups = {}
    for access_level in ACCESS_LEVELS:
        group = create_group(group_mail(dataset, access_level))
        access_level_groups[access_level] = group

        # The group provider ID is used by other stacks that depend on this one.
        group_provider_id_name = f'{access_level}-access-group-id'
        pulumi.export(group_provider_id_name, group.id)

        # Allow the access group cache to list memberships.
        gcp.cloudidentity.GroupMembership(
            f'access-group-cache-{access_level}-access-level-group-membership',
            group=group.id,
            preferred_member_key=gcp.cloudidentity.
            GroupMembershipPreferredMemberKeyArgs(
                id=ACCESS_GROUP_CACHE_SERVICE_ACCOUNT),
            roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
            opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
        )

        # Provide access transitively to datasets we depend on
        for dependency in config.get_object('depends_on') or ():
            dependency_group_id = dependency_stacks[dependency].get_output(
                group_provider_id_name, )

            dependency_group = gcp.cloudidentity.Group.get(
                f'{dependency}-{access_level}-access-level-group',
                dependency_group_id,
            )

            gcp.cloudidentity.GroupMembership(
                f'{dependency}-{access_level}-access-level-group-membership',
                group=dependency_group.id,
                preferred_member_key=group.group_key,
                roles=[
                    gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')
                ],
                opts=pulumi.resource.ResourceOptions(
                    depends_on=[cloudidentity]),
            )

    for dependency, dstack in dependency_stacks.items():
        # add the {dataset}-access group to the dependency
        depends_on_access_group_name = (
            group_mail(dependency, 'access').split('@')[0] + '-group-id')
        depends_on_access_group_id = dstack.get_output(
            depends_on_access_group_name, )
        depends_on_access_group = gcp.cloudidentity.Group.get(
            depends_on_access_group_name, depends_on_access_group_id)
        gcp.cloudidentity.GroupMembership(
            f'{dataset}-{dependency}-access',
            group=depends_on_access_group,
            preferred_member_key=access_group.group_key,
            roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
            opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
        )

    for kind, access_level, service_account in service_accounts_gen():
        gcp.cloudidentity.GroupMembership(
            f'{kind}-{access_level}-access-level-group-membership',
            group=access_level_groups[access_level],
            preferred_member_key=gcp.cloudidentity.
            GroupMembershipPreferredMemberKeyArgs(id=service_account),
            roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
            opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
        )

    secretmanager = gcp.projects.Service(
        'secretmanager-service',
        service='secretmanager.googleapis.com',
        disable_on_destroy=False,
    )

    # These secrets are used as a fast cache for checking memberships in the above groups.
    access_group_cache_secrets = {}
    for group_prefix in ('access', 'web-access') + ACCESS_LEVELS:
        access_secret = create_secret(
            f'{group_prefix}-group-cache-secret',
            secret_id=f'{dataset}-{group_prefix}-members-cache',
        )

        add_access_group_cache_as_secret_member(access_secret, group_prefix)

        access_group_cache_secrets[group_prefix] = access_secret

    gcp.secretmanager.SecretIamMember(
        'analyis-runner-access-group-cache-secret-accessor',
        secret_id=access_group_cache_secrets['access'].id,
        role='roles/secretmanager.secretAccessor',
        member=f'serviceAccount:{ANALYSIS_RUNNER_SERVICE_ACCOUNT}',
    )

    gcp.secretmanager.SecretIamMember(
        'web-server-web-access-group-cache-secret-accessor',
        secret_id=access_group_cache_secrets['web-access'].id,
        role='roles/secretmanager.secretAccessor',
        member=f'serviceAccount:{WEB_SERVER_SERVICE_ACCOUNT}',
    )

    # Sample metadata access

    # permissions for read / write
    #   - 4 secrets, main-read, main-write, test-read, test-write
    sm_groups = {}
    for env in ('main', 'test'):
        for rs in ('read', 'write'):
            key = f'sample-metadata-{env}-{rs}'

            group = create_group(group_mail(dataset, key))
            sm_groups[f'{env}-{rs}'] = group

            gcp.cloudidentity.GroupMembership(
                f'sample-metadata-group-cache-{env}-{rs}-group-membership',
                group=group,
                preferred_member_key=gcp.cloudidentity.
                GroupMembershipPreferredMemberKeyArgs(
                    id=ACCESS_GROUP_CACHE_SERVICE_ACCOUNT),
                roles=[
                    gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')
                ],
                opts=pulumi.resource.ResourceOptions(
                    depends_on=[cloudidentity]),
            )

            secret = create_secret(
                f'{key}-group-cache-secret',
                secret_id=f'{dataset}-{key}-members-cache',
            )
            add_access_group_cache_as_secret_member(secret,
                                                    resource_prefix=key)

            gcp.secretmanager.SecretIamMember(
                f'{key}-api-secret-accessor',
                secret_id=secret.id,
                role='roles/secretmanager.secretAccessor',
                member=f'serviceAccount:{SAMPLE_METADATA_API_SERVICE_ACCOUNT}',
            )

    # Add cloud run invoker to analysis-runner for the access-group
    gcp.cloudrun.IamMember(
        f'analysis-runner-access-invoker',
        location=REGION,
        project=ANALYSIS_RUNNER_PROJECT,
        service='server',
        role='roles/run.invoker',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    # Declare access to sample-metadata API of format ({env}-{read,write})
    sm_access_levels: List[SampleMetadataAccessorMembership] = [
        SampleMetadataAccessorMembership(
            name='human',
            member_key=access_group.group_key.id,
            permissions=('main-read', 'test-read', 'test-write'),
        ),
        SampleMetadataAccessorMembership(
            name='test',
            member_key=access_level_groups['test'].group_key.id,
            permissions=('main-read', 'test-read', 'test-write'),
        ),
        SampleMetadataAccessorMembership(
            name='standard',
            member_key=access_level_groups['standard'].group_key.id,
            permissions=('main-read', 'main-write'),
        ),
        SampleMetadataAccessorMembership(
            name='full',
            member_key=access_level_groups['full'].group_key.id,
            permissions=sm_groups.keys(),
        ),
        # allow the analysis-runner logging cloud function to update the sample-metadata project
        SampleMetadataAccessorMembership(
            name='analysis-runner-logger',
            member_key=ANALYSIS_RUNNER_LOGGER_SERVICE_ACCOUNT,
            permissions=sm_groups.keys(),
        ),
    ]

    # give access to sample_metadata groups (and hence sample-metadata API through secrets)
    for name, service_account, permission in sm_access_levels:
        for kind in permission:
            gcp.cloudidentity.GroupMembership(
                f'sample-metadata-{kind}-{name}-access-level-group-membership',
                group=sm_groups[kind],
                preferred_member_key=gcp.cloudidentity.
                GroupMembershipPreferredMemberKeyArgs(id=service_account),
                roles=[
                    gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')
                ],
                opts=pulumi.resource.ResourceOptions(
                    depends_on=[cloudidentity]),
            )

    gcp.projects.IAMMember(
        'project-buckets-lister',
        role=lister_role_id,
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    # Grant visibility to Dataproc utilization metrics etc.
    gcp.projects.IAMMember(
        'project-monitoring-viewer',
        role='roles/monitoring.viewer',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-test-bucket-admin',
        bucket=test_bucket.name,
        role='roles/storage.admin',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-test-upload-bucket-admin',
        bucket=test_upload_bucket.name,
        role='roles/storage.admin',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-test-tmp-bucket-admin',
        bucket=test_tmp_bucket.name,
        role='roles/storage.admin',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-test-analysis-bucket-admin',
        bucket=test_analysis_bucket.name,
        role='roles/storage.admin',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-test-web-bucket-admin',
        bucket=test_web_bucket.name,
        role='roles/storage.admin',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )
    for bname, upload_bucket in main_upload_buckets.items():
        bucket_member(
            f'access-group-{bname}-bucket-viewer',
            bucket=upload_bucket.name,
            role=viewer_role_id,
            member=pulumi.Output.concat('group:', access_group.group_key.id),
        )

    bucket_member(
        'access-group-main-analysis-bucket-viewer',
        bucket=main_analysis_bucket.name,
        role=viewer_role_id,
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    bucket_member(
        'access-group-main-web-bucket-viewer',
        bucket=main_web_bucket.name,
        role=viewer_role_id,
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    if enable_release:
        release_bucket = create_bucket(
            bucket_name('release-requester-pays'),
            lifecycle_rules=[undelete_rule],
            requester_pays=True,
        )

        bucket_member(
            'access-group-release-bucket-viewer',
            bucket=release_bucket.name,
            role=viewer_role_id,
            member=pulumi.Output.concat('group:', access_group.group_key.id),
        )

        release_access_group = create_group(
            group_mail(dataset, 'release-access'))

        bucket_member(
            'release-access-group-release-bucket-viewer',
            bucket=release_bucket.name,
            role=viewer_role_id,
            member=pulumi.Output.concat('group:',
                                        release_access_group.group_key.id),
        )

    bucket_member(
        'web-server-test-web-bucket-viewer',
        bucket=test_web_bucket.name,
        role=viewer_role_id,
        member=pulumi.Output.concat('serviceAccount:',
                                    WEB_SERVER_SERVICE_ACCOUNT),
    )

    bucket_member(
        'web-server-main-web-bucket-viewer',
        bucket=main_web_bucket.name,
        role=viewer_role_id,
        member=pulumi.Output.concat('serviceAccount:',
                                    WEB_SERVER_SERVICE_ACCOUNT),
    )

    # Allow the usage of requester-pays buckets.
    gcp.projects.IAMMember(
        f'access-group-serviceusage-consumer',
        role='roles/serviceusage.serviceUsageConsumer',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    # Allow the access group cache to list memberships.
    gcp.cloudidentity.GroupMembership(
        'access-group-cache-membership',
        group=access_group.id,
        preferred_member_key=gcp.cloudidentity.
        GroupMembershipPreferredMemberKeyArgs(
            id=ACCESS_GROUP_CACHE_SERVICE_ACCOUNT),
        roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    gcp.cloudidentity.GroupMembership(
        'web-access-group-cache-membership',
        group=web_access_group.id,
        preferred_member_key=gcp.cloudidentity.
        GroupMembershipPreferredMemberKeyArgs(
            id=ACCESS_GROUP_CACHE_SERVICE_ACCOUNT),
        roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    # All members of the access group have web access automatically.
    gcp.cloudidentity.GroupMembership(
        'web-access-group-access-group-membership',
        group=web_access_group.id,
        preferred_member_key=access_group.group_key,
        roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    for access_level, group in access_level_groups.items():
        # Allow the service accounts to pull images. Note that the global project will
        # refer to the dataset, but the Docker images are stored in the "analysis-runner"
        # and "cpg-common" projects' Artifact Registry repositories.
        for project in [ANALYSIS_RUNNER_PROJECT, CPG_COMMON_PROJECT]:
            gcp.artifactregistry.RepositoryIamMember(
                f'{access_level}-images-reader-in-{project}',
                project=project,
                location=REGION,
                repository='images',
                role='roles/artifactregistry.reader',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

        # Allow non-test service accounts to write images to the "cpg-common" Artifact
        # Registry repository.
        if access_level != 'test':
            gcp.artifactregistry.RepositoryIamMember(
                f'{access_level}-images-writer-in-cpg-common',
                project=CPG_COMMON_PROJECT,
                location=REGION,
                repository='images',
                role='roles/artifactregistry.writer',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

        # Read access to reference data.
        bucket_member(
            f'{access_level}-reference-bucket-viewer',
            bucket=REFERENCE_BUCKET_NAME,
            role=viewer_role_id,
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        # Allow the usage of requester-pays buckets.
        gcp.projects.IAMMember(
            f'{access_level}-serviceusage-consumer',
            role='roles/serviceusage.serviceUsageConsumer',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

    # The bucket used for Hail Batch pipelines.
    hail_bucket = create_bucket(bucket_name('hail'),
                                lifecycle_rules=[undelete_rule])

    for access_level, service_account in service_accounts['hail']:
        # Full access to the Hail Batch bucket.
        bucket_member(
            f'hail-service-account-{access_level}-hail-bucket-admin',
            bucket=hail_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

    # Permissions increase by access level:
    # - test: view / create on any "test" bucket
    # - standard: view / create on any "test" or "main" bucket
    # - full: view / create / delete anywhere
    for access_level, group in access_level_groups.items():
        # test bucket
        bucket_member(
            f'{access_level}-test-bucket-admin',
            bucket=test_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        # test-upload bucket
        bucket_member(
            f'{access_level}-test-upload-bucket-admin',
            bucket=test_upload_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        # test-tmp bucket
        bucket_member(
            f'{access_level}-test-tmp-bucket-admin',
            bucket=test_tmp_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        # test-analysis bucket
        bucket_member(
            f'{access_level}-test-analysis-bucket-admin',
            bucket=test_analysis_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        # test-web bucket
        bucket_member(
            f'{access_level}-test-web-bucket-admin',
            bucket=test_web_bucket.name,
            role='roles/storage.admin',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )

        if access_level == 'standard':
            # main bucket
            bucket_member(
                f'standard-main-bucket-view-create',
                bucket=main_bucket.name,
                role=viewer_creator_role_id,
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-upload bucket
            for bname, upload_bucket in main_upload_buckets.items():
                bucket_member(
                    f'standard-{bname}-bucket-viewer',
                    bucket=upload_bucket.name,
                    role=viewer_role_id,
                    member=pulumi.Output.concat('group:', group.group_key.id),
                )

            # main-tmp bucket
            bucket_member(
                f'standard-main-tmp-bucket-view-create',
                bucket=main_tmp_bucket.name,
                role=viewer_creator_role_id,
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-analysis bucket
            bucket_member(
                f'standard-main-analysis-bucket-view-create',
                bucket=main_analysis_bucket.name,
                role=viewer_creator_role_id,
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-web bucket
            bucket_member(
                f'standard-main-web-bucket-view-create',
                bucket=main_web_bucket.name,
                role=viewer_creator_role_id,
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

        if access_level == 'full':
            # main bucket
            bucket_member(
                f'full-main-bucket-admin',
                bucket=main_bucket.name,
                role='roles/storage.admin',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-upload bucket
            for bname, upload_bucket in main_upload_buckets.items():
                bucket_member(
                    f'full-{bname}-bucket-admin',
                    bucket=upload_bucket.name,
                    role='roles/storage.admin',
                    member=pulumi.Output.concat('group:', group.group_key.id),
                )

            # main-tmp bucket
            bucket_member(
                f'full-main-tmp-bucket-admin',
                bucket=main_tmp_bucket.name,
                role='roles/storage.admin',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-analysis bucket
            bucket_member(
                f'full-main-analysis-bucket-admin',
                bucket=main_analysis_bucket.name,
                role='roles/storage.admin',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # main-web bucket
            bucket_member(
                f'full-main-web-bucket-admin',
                bucket=main_web_bucket.name,
                role='roles/storage.admin',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # archive bucket
            bucket_member(
                f'full-archive-bucket-admin',
                bucket=archive_bucket.name,
                role='roles/storage.admin',
                member=pulumi.Output.concat('group:', group.group_key.id),
            )

            # release bucket
            if enable_release:
                bucket_member(
                    f'full-release-bucket-admin',
                    bucket=release_bucket.name,
                    role='roles/storage.admin',
                    member=pulumi.Output.concat('group:', group.group_key.id),
                )

    # Notebook permissions
    notebook_account = gcp.serviceaccount.Account(
        'notebook-account',
        project=NOTEBOOKS_PROJECT,
        account_id=f'notebook-{dataset}',
        display_name=f'Notebook service account for dataset {dataset}',
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    gcp.projects.IAMMember(
        'notebook-account-compute-admin',
        project=NOTEBOOKS_PROJECT,
        role='roles/compute.admin',
        member=pulumi.Output.concat('serviceAccount:', notebook_account.email),
    )

    gcp.serviceaccount.IAMMember(
        'notebook-account-users',
        service_account_id=notebook_account,
        role='roles/iam.serviceAccountUser',
        member=pulumi.Output.concat('group:', access_group.group_key.id),
    )

    # Grant the notebook account the same permissions as the access group members.
    gcp.cloudidentity.GroupMembership(
        'notebook-service-account-access-group-member',
        group=access_group.id,
        preferred_member_key=gcp.cloudidentity.
        GroupMembershipPreferredMemberKeyArgs(id=notebook_account.email),
        roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
        opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
    )

    def find_service_account(kind: str, access_level: str) -> Optional[str]:
        for local_access_level, service_account in service_accounts[kind]:
            if access_level == local_access_level:
                return service_account
        return None

    for access_level, service_account in service_accounts['dataproc']:
        # Hail Batch service accounts need to be able to act as Dataproc service
        # accounts to start Dataproc clusters.
        gcp.serviceaccount.IAMMember(
            f'hail-service-account-{access_level}-dataproc-service-account-user',
            service_account_id=pulumi.Output.concat('projects/', project_id,
                                                    '/serviceAccounts/',
                                                    service_account),
            role='roles/iam.serviceAccountUser',
            member=pulumi.Output.concat(
                'serviceAccount:', find_service_account('hail', access_level)),
        )

        gcp.projects.IAMMember(
            f'dataproc-service-account-{access_level}-dataproc-worker',
            role='roles/dataproc.worker',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

    for access_level, service_account in service_accounts['hail']:
        # The Hail service account creates the cluster, specifying the Dataproc service
        # account as the worker.
        gcp.projects.IAMMember(
            f'hail-service-account-{access_level}-dataproc-admin',
            role='roles/dataproc.admin',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

        # Worker permissions are necessary to submit jobs.
        gcp.projects.IAMMember(
            f'hail-service-account-{access_level}-dataproc-worker',
            role='roles/dataproc.worker',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

        # Add Hail service accounts to Cromwell access group.
        gcp.cloudidentity.GroupMembership(
            f'hail-service-account-{access_level}-cromwell-access',
            group=CROMWELL_ACCESS_GROUP_ID,
            preferred_member_key=gcp.cloudidentity.
            GroupMembershipPreferredMemberKeyArgs(id=service_account, ),
            roles=[gcp.cloudidentity.GroupMembershipRoleArgs(name='MEMBER')],
            opts=pulumi.resource.ResourceOptions(depends_on=[cloudidentity]),
        )

    for access_level, service_account in service_accounts['cromwell']:
        # Allow the Cromwell server to run worker VMs using the Cromwell service
        # accounts.
        gcp.serviceaccount.IAMMember(
            f'cromwell-runner-{access_level}-service-account-user',
            service_account_id=pulumi.Output.concat('projects/', project_id,
                                                    '/serviceAccounts/',
                                                    service_account),
            role='roles/iam.serviceAccountUser',
            member=f'serviceAccount:{CROMWELL_RUNNER_ACCOUNT}',
        )

        # To use a service account for VMs, Cromwell accounts need to be allowed to act
        # on their own behalf ;).
        gcp.serviceaccount.IAMMember(
            f'cromwell-service-account-{access_level}-service-account-user',
            service_account_id=pulumi.Output.concat('projects/', project_id,
                                                    '/serviceAccounts/',
                                                    service_account),
            role='roles/iam.serviceAccountUser',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

        # Allow the Cromwell service accounts to run workflows.
        gcp.projects.IAMMember(
            f'cromwell-service-account-{access_level}-workflows-runner',
            role='roles/lifesciences.workflowsRunner',
            member=pulumi.Output.concat('serviceAccount:', service_account),
        )

        # Store the service account key as a secret that's readable by the
        # analysis-runner.
        key = gcp.serviceaccount.Key(
            f'cromwell-service-account-{access_level}-key',
            service_account_id=service_account,
        )

        secret = create_secret(
            f'cromwell-service-account-{access_level}-secret',
            secret_id=f'{dataset}-cromwell-{access_level}-key',
            project=ANALYSIS_RUNNER_PROJECT,
        )

        gcp.secretmanager.SecretVersion(
            f'cromwell-service-account-{access_level}-secret-version',
            secret=secret.id,
            secret_data=key.private_key.apply(
                lambda s: base64.b64decode(s).decode('utf-8')),
        )

        gcp.secretmanager.SecretIamMember(
            f'cromwell-service-account-{access_level}-secret-accessor',
            project=ANALYSIS_RUNNER_PROJECT,
            secret_id=secret.id,
            role='roles/secretmanager.secretAccessor',
            member=f'serviceAccount:{ANALYSIS_RUNNER_SERVICE_ACCOUNT}',
        )

        # Allow the Hail service account to access its corresponding cromwell key
        hail_service_account = find_service_account('hail', access_level)
        gcp.secretmanager.SecretIamMember(
            f'cromwell-service-account-{access_level}-self-accessor',
            project=ANALYSIS_RUNNER_PROJECT,
            secret_id=secret.id,
            role='roles/secretmanager.secretAccessor',
            member=f'serviceAccount:{hail_service_account}',
        )

    for access_level, group in access_level_groups.items():
        # Give hail / dataproc / cromwell access to sample-metadata cloud run service
        gcp.cloudrun.IamMember(
            f'sample-metadata-service-account-{access_level}-invoker',
            location=REGION,
            project=SAMPLE_METADATA_PROJECT,
            service='sample-metadata-api',
            role='roles/run.invoker',
            member=pulumi.Output.concat('group:', group.group_key.id),
        )
Example #12
0
# Copyright 2020, Pulumi Corporation.  All rights reserved.

import pulumi

config = pulumi.Config()
org = config.require('org')
slug = f"{org}/{pulumi.get_project()}/{pulumi.get_stack()}"
a = pulumi.StackReference(slug)

oldVal = a.get_output('val')

if len(oldVal) != 2 or oldVal[0] != 'a' or oldVal[1] != 'b':
    raise Exception('Invalid result')

pulumi.export('val2', pulumi.Output.secret(['a', 'b']))
Example #13
0
)

# pulumi.export('IMAGE', my_image)

# image_to_use = docker.RemoteImage("my-nginx",
#     name="pulumi-demo:0.0.1",
#     keep_locally=True
# )

container = docker.Container("my-nginx",
    name="my-nginx",
    image=my_image.base_image_name,
    ports=[{"internal":80, "external":80}]
)

# pulumi.export('CONTAINER', container)

repo = pulumi.StackReference('aldenso/github_example/'+env)

with open("Dockerfile", 'r') as f:
    filedata = f.read()

dockerfile = github.RepositoryFile("Dockerfile",
    content=filedata,
    file="Dockerfile",
    repository=repo.get_output("repo")["id"],
    branch="master"
)

# pulumi.export('DOCKERFILE', dockerfile)
Example #14
0
def main() -> None:
    ##### Preamble
    stack_name = config.STACK_NAME

    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags(
        {"pulumi:project": pulumi.get_project(), "pulumi:stack": stack_name}
    )

    nomad_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        nomad_server_stack = pulumi.StackReference(f"grapl/nomad/{stack_name}")
        nomad_provider = get_nomad_provider_address(nomad_server_stack)

    ##### Business Logic
    grapl_stack = GraplStack(stack_name)

    e2e_test_job_vars: NomadVars = {
        "analyzer_bucket": grapl_stack.analyzer_bucket,
        "aws_env_vars_for_local": grapl_stack.aws_env_vars_for_local,
        "aws_region": aws.get_region().name,
        "container_images": _e2e_container_images(artifacts),
        # Used by graplctl to determine if it should manual-event or not
        "stack_name": grapl_stack.upstream_stack_name,
        "kafka_bootstrap_servers": grapl_stack.kafka_bootstrap_servers,
        "kafka_sasl_username": grapl_stack.kafka_e2e_sasl_username,
        "kafka_sasl_password": grapl_stack.kafka_e2e_sasl_password,
        "kafka_consumer_group_name": grapl_stack.kafka_e2e_consumer_group_name,
        "schema_properties_table_name": grapl_stack.schema_properties_table_name,
        "sysmon_log_bucket": grapl_stack.sysmon_log_bucket,
        "schema_table_name": grapl_stack.schema_table_name,
        "sysmon_generator_queue": grapl_stack.sysmon_generator_queue,
        "test_user_name": grapl_stack.test_user_name,
        "test_user_password_secret_id": grapl_stack.test_user_password_secret_id,
    }

    e2e_tests = NomadJob(
        "e2e-tests",
        jobspec=path_from_root("nomad/e2e-tests.nomad").resolve(),
        vars=e2e_test_job_vars,
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    if config.LOCAL_GRAPL:
        # We don't do integration tests in AWS yet, mostly because the current
        # Python Pants integration test setup is funky and requires an on-disk
        # Grapl repo.

        integration_test_job_vars: NomadVars = {
            "aws_env_vars_for_local": grapl_stack.aws_env_vars_for_local,
            "aws_region": aws.get_region().name,
            "container_images": _integration_container_images(artifacts),
            "docker_user": os.environ["DOCKER_USER"],
            "grapl_root": os.environ["GRAPL_ROOT"],
            "kafka_bootstrap_servers": grapl_stack.kafka_bootstrap_servers,
            "kafka_sasl_username": grapl_stack.kafka_e2e_sasl_username,
            "kafka_sasl_password": grapl_stack.kafka_e2e_sasl_password,
            "redis_endpoint": grapl_stack.redis_endpoint,
            "schema_properties_table_name": grapl_stack.schema_properties_table_name,
            "test_user_name": grapl_stack.test_user_name,
            "test_user_password_secret_id": grapl_stack.test_user_password_secret_id,
            "plugin_work_queue_db_hostname": grapl_stack.plugin_work_queue_db_hostname,
            "plugin_work_queue_db_port": grapl_stack.plugin_work_queue_db_port,
            "plugin_work_queue_db_username": grapl_stack.plugin_work_queue_db_username,
            "plugin_work_queue_db_password": grapl_stack.plugin_work_queue_db_password,
            "organization_management_db_hostname": grapl_stack.organization_management_db_hostname,
            "organization_management_db_port": grapl_stack.organization_management_db_port,
            "organization_management_db_username": grapl_stack.organization_management_db_username,
            "organization_management_db_password": grapl_stack.organization_management_db_password,
        }

        integration_tests = NomadJob(
            "integration-tests",
            jobspec=path_from_root("nomad/local/integration-tests.nomad").resolve(),
            vars=integration_test_job_vars,
            opts=pulumi.ResourceOptions(provider=nomad_provider),
        )
Example #15
0
    def __init__(self, resource_name, props: Optional[dict] = None, opts=None):
        super(MyCustom, self).__init__("pkg:index:MyCustom", resource_name,
                                       props, opts)


def do_invoke():
    value = pulumi.runtime.invoke("test:index:MyFunction", props={
        "value": 41
    }).value
    return value["out_value"]


mycomponent = MyComponent("mycomponent", inprop="hello")
myinstance = Instance("instance",
                      name="myvm",
                      value=pulumi.Output.secret("secret_value"))
mycustom = MyCustom("mycustom", {"instance": myinstance})
invoke_result = do_invoke()

# Pass myinstance several more times to ensure deserialization of the resource reference
# works on other asyncio threads.
for x in range(5):
    MyCustom(f"mycustom{x}", {"instance": myinstance})

dns_ref = pulumi.StackReference("dns")

pulumi.export("hello", "world")
pulumi.export("outprop", mycomponent.outprop)
pulumi.export("public_ip", myinstance.public_ip)
 def stack_reference(self, stack_name: str):
     stack = pulumi.StackReference(stack_name)
     return stack
Example #17
0
"""An AWS Python Pulumi program"""
# REF: https://github.com/pulumi/examples/blob/master/aws-py-fargate/__main__.py

import json
import pulumi
import pulumi_aws as aws

# get configs defined in our yaml files
config = pulumi.Config()
network_layer_stack = config.require("network-layer-stack")
pulumi_account = config.require("user-account")

# get settings from stack references
env = pulumi.get_stack()
network_layer = pulumi.StackReference(f"{pulumi_account}/network-layer/{env}")

# Read back the project VPC and subnets id's that were set up in the network-layer-{env}, which we will use.
vpc_id = network_layer.require_output("vcp_id")
vpc_azs = network_layer.require_output("vpc_azs")
private_subnet_1_id = network_layer.require_output("private_subnet_1_id")
private_subnet_2_id = network_layer.require_output("private_subnet_2_id")
public_subnet_1_id = network_layer.require_output("public_subnet_1_id")
public_subnet_2_id = network_layer.require_output("public_subnet_2_id")

# # un-stringify the lists
# private_subnets = json.loads(private_subnets)
# public_subnets = json.loads(public_subnets)

# Create an ECS cluster to run a container-based service.
cluster = aws.ecs.Cluster("dask-ml-workflows")
Example #18
0
import pulumi
import pulumi_aws as aws
import pulumi_random as random

# read local config settings
config = pulumi.Config()
certificate_arn = config.require("certificate_arn")
web_type = config.require("web_type")
api_type = config.require("api_type")
web_port = config.require_int("web_port")
api_port = config.require_int("api_port")
alb_port = config.require_int("alb_port")
vpc_stack = config.require("vpc_stack")

# get stack reference from vpc
vpc = pulumi.StackReference(vpc_stack)

# get vpc/subnet
vpc_id = vpc.get_output("vpc_id")
private_subnets = vpc.get_output("private_subnets")
private_subnet_ids = vpc.get_output("private_subnet_ids")
public_subnets = vpc.get_output("public_subnets")
public_subnet_ids = vpc.get_output("public_subnet_ids")

# get user data script for web
with open('web_user_data.tpl', 'r') as tpl_file:
    web_user_data = tpl_file.read()

# get user data script for api
with open('api_user_data.tpl', 'r') as tpl_file:
    api_user_data = tpl_file.read()