예제 #1
0
def verify_data(r):
    t = r.resource_type
    if t != "pulumi-nodejs:dynamic:Resource":
        return

    # Verify is_dry_run()
    assert is_dry_run() == r.props["isDryRun"]

    # Verify get_project()
    assert "PULUMI_TEST_PROJECT" in os.environ
    assert get_project() == os.environ["PULUMI_TEST_PROJECT"]
    assert get_project() == r.props["getProject"]

    # Verify get_stack()
    assert "PULUMI_TEST_STACK" in os.environ
    assert get_stack() == os.environ["PULUMI_TEST_STACK"]
    assert get_stack() == r.props["getStack"]

    # Verify Config
    assert json.dumps(CONFIG,
                      sort_keys=True) == json.dumps(dict(r.props["allConfig"]),
                                                    sort_keys=True)
    config = Config()
    value = config.require("aConfigValue")
    assert value == "this value is a value"
    assert aws_config.region == "us-west-2"
예제 #2
0
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for eip_name, eip_configuration in resource_specs.items():

            # AWS Elastic IP Dynamic Variables
            resource_name = eip_name
            resource_tags = eip_configuration["tags"] if "'tags':" in str(
                eip_configuration) else None

            # Lists
            tags_list = {}

            # Getting list of tags from configuration file
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            eip = net.Eip(resource_name, tags=tags_list)

            eip_ids_dict.update({eip._name: eip.id})

            # Exporting each EIP
            pulumi.export(eip._name, eip.id)
예제 #3
0
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        #
        # ECR Repository
        #

        for ecr_repo_name, ecr_repo_configuration in resource_specs.items():

            # AWS ECR Dynamic Variables
            resource_repo_name = ecr_repo_name
            # resource_repo_version      = eks_repo_configuration["version"]

            resource_repo_tags = None
            resource_repo_tags = ecr_repo_configuration[
                "tags"] if "tags" in ecr_repo_configuration else None

            # Getting list of tags from configuration file
            repo_tags_list = {}

            if resource_repo_tags is not None:
                for each_tag_name, each_tag_value in resource_repo_tags.items(
                ):
                    repo_tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            repo_tags_list.update({"Name": resource_repo_name})
            repo_tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            repo_tags_list.update(resource_mandatory_tags)
예제 #4
0
파일: subnet.py 프로젝트: ascential/pulpy
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()
        aws_vpc_id = VPCs.VPCId()

        for subnet_name, subnet_configuration in resource_specs.items():

            # AWS Subnet Dynamic Variables
            resource_name = subnet_name
            resource_az = subnet_configuration["az"]
            resource_cidr = subnet_configuration["cidr"]
            resource_assign_public_ipv4 = subnet_configuration[
                "assign-public-ipv4"]
            resource_vpc = subnet_configuration["vpc"]

            resource_tags = None
            resource_tags = subnet_configuration["tags"] if "'tags':" in str(
                subnet_configuration) else None

            this_vpc = aws_vpc_id[str(resource_vpc)]

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            subnet = net.Subnet(
                resource_name,
                vpc_id=this_vpc,
                cidr_block=resource_cidr,
                map_public_ip_on_launch=resource_assign_public_ipv4,
                availability_zone=resource_az,
                tags=tags_list

                # FIXME: This needs to be sorted
                # opts = pulumi.ResourceOptions(
                #     parent      = this_vpc,
                #     depends_on  = [this_vpc]
                # )
            )

            subnet_ids_dict.update({subnet._name: subnet.id})
            subnet_cidr_blocks_dict.update({subnet._name: subnet.cidr_block})
            subnet_azs_dict.update({subnet._name: subnet.availability_zone})

            # Exporting each subnet created for future reference
            pulumi.export(subnet._name, subnet.id)
예제 #5
0
    def ParameterGroup(self):

        resource_specs  = ParseYAML(resource_type).getSpecs()

        for parametergroup_name, parametergroup_configuration in resource_specs["parameter-group"].items():

            # AWS DocumentDB Parameter Group Dynamic Variables
            resource_name           = parametergroup_name
            resource_description    = parametergroup_configuration["description"]
            resource_family         = parametergroup_configuration["family"]

            resource_tags           = None
            resource_tags           = parametergroup_configuration["tags"] if "tags" in parametergroup_configuration else None

            # Getting list of tags from configuration file
            tags_list               = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Add mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()})
            tags_list.update(resource_mandatory_tags)

            # Getting all parameters for each
            # individual DocumentDB Parameter Group
            resource_parameters = []

            for each_parameter_key, each_parameter_value in parametergroup_configuration["parameters"].items():

                resource_parameters.append(
                    {
                        "name": each_parameter_key,
                        "value": each_parameter_value["value"],
                        "applyMethod": each_parameter_value["apply"],
                    },
                )

            # Create resource
            parametergroup          = docdb.ClusterParameterGroup(

                resource_name,
                description         = resource_description,
                family              = resource_family,
                parameters          = resource_parameters,
                tags                = tags_list

            )

            # Update resource dictionary
            parametergroup_ids_dict.update({parametergroup._name: parametergroup.id})

            # Export parameter group
            pulumi.export(parametergroup._name, parametergroup.id)
예제 #6
0
    def __init__(self, tags: dict = None):
        config = pulumi.Config()

        self.__tags = {
            'project': pulumi.get_project(),
            'stack': pulumi.get_stack(),
            'costCenter': config.require('cost-center')
        }

        if tags is not None:
            self.__tags.update(tags)
예제 #7
0
def format_resource_name(name):
    """
    Defines a fully-formated resource name
    """
    template = '{project}-{stack}-{name}'
    resource_name = template.format(
        name=name,
        project=pulumi.get_project(),
        stack=pulumi.get_stack(),
    )
    return resource_name
예제 #8
0
파일: route53.py 프로젝트: ascential/pulpy
    def PublicZone(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for r53_public_zone_name, r53_public_zone_configuration in resource_specs[
                "public-zone"].items():

            # Route53 Public Dynamic Variables
            resource_name = r53_public_zone_name

            # Resetting all optional variables
            # with the default value None
            resource_comment    = \
            resource_tags       = None

            # Cheking the documents content, if present
            # we will be assigning their values to our variables,
            # otherwise we'll set them to None
            resource_comment = r53_public_zone_configuration[
                "comment"] if "comment" in r53_public_zone_configuration else None
            resource_tags = r53_public_zone_configuration[
                "tags"] if "tags" in r53_public_zone_configuration else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            # Create Route53 Public Zone
            route53_public_zone = route53.Zone(resource_name,
                                               name=resource_name,
                                               comment=resource_comment,
                                               tags=tags_list)

            pulumi.export(resource_name,
                          [{
                              "ID": route53_public_zone.id,
                              "Name servers": route53_public_zone.name_servers,
                              "Zone ID": route53_public_zone.zone_id
                          }])

            route53_public_zone_ids_dict.update(
                {route53_public_zone._name: route53_public_zone.id})
예제 #9
0
def _get_execution_context(subscription_id) -> ExecutionContext:
    logging.basicConfig(level=logging.DEBUG)
    location = pulumi.config.get_config('azure:location')
    platform = pulumi.config.get_config('platform:name')
    project = pulumi.get_project()
    stack = pulumi.get_stack()
    context = ExecutionContext(location, platform, stack, project,
                               subscription_id, datetime.now())

    v = Validator(EXECUTION_CONTEXT_SCHEMA, require_all=True)
    if not v.validate(asdict(context)):
        raise Exception(v.errors)

    return context
예제 #10
0
    def SubnetGroup(self):

        resource_specs  = ParseYAML(resource_type).getSpecs()
        aws_subnet_id   = Subnets.SubnetId()

        for subnetgroup_name, subnetgroup_configuration in resource_specs["subnet-group"].items():

            # AWS DocumentDB Subnet Group Dynamic Variables
            resource_name           = subnetgroup_name
            resource_description    = subnetgroup_configuration["description"]
            resource_subnet_ids     = subnetgroup_configuration["subnets"]

            resource_tags           = None
            resource_tags           = subnetgroup_configuration["tags"] if "tags" in subnetgroup_configuration else None

            # Getting list of tags from configuration file
            tags_list               = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()})
            tags_list.update(resource_mandatory_tags)

            resource_subnets_list   = []

            for each_subnet_found in resource_subnet_ids:
                resource_subnets_list.append(aws_subnet_id[str(each_subnet_found)])

            subnetgroup                 = docdb.SubnetGroup(

                resource_name,
                description             = resource_description,
                subnet_ids              = resource_subnets_list,
                tags                    = tags_list

            )

            # Update resource dictionaries
            subnetgroup_ids_dict.update({subnetgroup._name: subnetgroup.id})

            # Export
            pulumi.export(subnetgroup._name, subnetgroup.id)
예제 #11
0
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()
        aws_subnet_id = Subnets.SubnetId()
        aws_eip_id = ElasticIPs.ElasticIPId()

        for natgw_name, natgw_configuration in resource_specs.items():

            # AWS NAT Gateway Variables
            resource_name = natgw_name
            resource_subnet = natgw_configuration["subnet"]
            resource_eip = natgw_configuration["elastic_ip"]

            resource_tags = None
            resource_tags = natgw_configuration[
                "tags"] if "tags" in natgw_configuration else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            this_subnet = aws_subnet_id[str(resource_subnet)]
            this_eip = aws_eip_id[str(resource_eip)]

            aws_natgw = net.NatGateway(resource_name,
                                       subnet_id=this_subnet,
                                       allocation_id=this_eip,
                                       tags=tags_list)

            # Update resource dictionaries
            natgw_ids_dict.update({aws_natgw._name: aws_natgw.id})

            # Export
            pulumi.export(aws_natgw._name, aws_natgw.id)
예제 #12
0
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for vpc_name, vpc_conf in resource_specs.items():

            # AWS VPC Dynamic Variables
            resource_name = vpc_name
            resource_cidr = vpc_conf['cidr']
            resource_dns_resolution = vpc_conf['dns-resolution']
            resource_dns_hostnames = vpc_conf['dns-hostnames']

            resource_tags = None
            resource_tags = vpc_conf["tags"] if "tags" in vpc_conf else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Add mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            # Create resource
            vpc = net.Vpc(resource_name,
                          cidr_block=resource_cidr,
                          enable_dns_support=resource_dns_resolution,
                          enable_dns_hostnames=resource_dns_hostnames,
                          tags=tags_list)

            # Update resource dictionary
            vpc_ids_dict.update({vpc._name: vpc.id})

            # Export the name of each VPC
            pulumi.export(vpc._name, vpc.id)
예제 #13
0
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for s3_bucket_name, s3_bucket_configuration in resource_specs.items():

            # AWS S3 Dynamic Variables
            resource_name = s3_bucket_name

            resource_tags = None
            resource_tags = s3_bucket_configuration[
                "tags"] if "tags" in s3_bucket_configuration else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            sse_config = s3_bucket_configuration[
                "serverSideEncryptionConfiguration"] if "serverSideEncryptionConfiguration" in s3_bucket_configuration else None

            # Create S3s
            bucket = s3.Bucket(
                resource_name,
                acl=s3_bucket_configuration["acl"],
                force_destroy=s3_bucket_configuration["force-destroy"],
                tags=tags_list,
                server_side_encryption_configuration=sse_config)

            # Export
            pulumi.export(bucket._name, bucket.id)
예제 #14
0
    def __init__(self):

        resource_specs  = ParseYAML(resource_type).getSpecs()
        aws_vpc_id      = VPCs.VPCId()

        for igw_name, igw_configuration in resource_specs.items():

            # AWS Internet Gateway Variables
            resource_name   = igw_name
            resource_vpc    = igw_configuration["vpc"]

            resource_tags   = None
            resource_tags   = igw_configuration["tags"] if "tags" in igw_configuration else None

            this_vpc        = aws_vpc_id[str(resource_vpc)]

            # Getting list of tags from configuration file
            tags_list       = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({"Project/Stack": pulumi.get_project() + "/" + pulumi.get_stack()})
            tags_list.update(resource_mandatory_tags)

            aws_igw     = igw.InternetGateway(

                resource_name,
                vpc_id  = this_vpc,
                tags    = resource_tags

            )

            igw_ids_dict.update({aws_igw._name: aws_igw.id})

            # Export the name of each Internet Gateway
            pulumi.export(aws_igw._name, aws_igw.id)
예제 #15
0
파일: keypair.py 프로젝트: ascential/pulpy
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for keypair_name, keypair_configuration in resource_specs.items():

            # AWS KeyPair Dynamic Variables
            resource_name = keypair_name
            resource_public_key = keypair_configuration['public_key']

            resource_tags = None
            resource_tags = keypair_configuration[
                "tags"] if "tags" in keypair_configuration else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            # Create resource
            keypair = am.KeyPair(resource_name,
                                 public_key=resource_public_key,
                                 tags=tags_list)

            # Update resource dictionary
            keypair_ids_dict.update({keypair._name: keypair.id})

            # Exporting each KeyPair ID created for future reference
            pulumi.export(keypair._name, keypair.id)
예제 #16
0
파일: lambda_.py 프로젝트: ascential/pulpy
    def __init__(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        for lambda_name, config in resource_specs.items():
            config = config if config else {}

            resource_name = lambda_name
            resource_tags = config.get("tags")
            resource_env = config.get("environment")

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Generating ENV vars
            env_list = {}
            if resource_env is not None:
                for each_env_name, each_env_value in resource_env.items():
                    env_list.update({each_env_name: each_env_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            lambda_function = lambda_.Function(
                lambda_name,
                environment=lambda_.FunctionEnvironmentArgs(
                    variables=env_list),
                handler=config.get("handler"),
                s3_bucket=config.get("s3_bucket"),
                s3_key=config.get("s3_key"),
                s3_object_version=config.get("s3_object_version"),
                memory_size=config.get("memory_size"),
                publish=config.get("publish"),
                reserved_concurrent_executions=config.get(
                    "reserved_concurrent_executions"),
                role=IAM.RoleARN()[config.get("role")],
                runtime=config.get("runtime"),
                timeout=config.get("timeout"),
                tags=tags_list)

            # Export
            pulumi.export(lambda_function._name, lambda_function.id)

            # Event source mappings
            for mapping_name, mapping_config in config.get(
                    "event_source_mapping").items():

                event_source = mapping_config["event_source"]
                assert event_source.get(
                    "type"
                ) == "sqs", "Just sqs is currently supported as event source mapping. You're welcome to implement more."

                source_arn = SQS.ByName()[event_source["name"]].arn

                mapping = lambda_.EventSourceMapping(
                    mapping_name,
                    event_source_arn=source_arn,
                    function_name=lambda_function.arn,
                    batch_size=mapping_config.get("batch_size"))
                pulumi.export(mapping_name, mapping.id)

            lambdas_by_name[lambda_name] = lambda_function
예제 #17
0
  user:
    auth-provider:
      config:
        cmd-args: config config-helper --format=json
        cmd-path: gcloud
        expiry-key: '{{.credential.token_expiry}}'
        token-key: '{{.credential.access_token}}'
      name: gcp
""".format(info[2]['clusterCaCertificate'], info[1], '{0}_{1}_{2}'.format(
    project, zone, info[0])))

# Make a Kubernetes provider instance that uses our cluster from above.
k8s_provider = Provider('gke_k8s', kubeconfig=k8s_config)

# Create a canary deployment to test that this cluster works.
labels = {'app': 'canary-{0}-{1}'.format(get_project(), get_stack())}
canary = Deployment('canary',
                    spec={
                        'selector': {
                            'matchLabels': labels
                        },
                        'replicas': 1,
                        'template': {
                            'metadata': {
                                'labels': labels
                            },
                            'spec': {
                                'containers': [{
                                    'name': 'nginx',
                                    'image': 'nginx'
                                }]
예제 #18
0
import pulumi
from pulumi_aws import iam, glue
from datalake import DatalakeInfra
from functions import glue_notification, scripts_archive
from distribution import EtlJobDist
from etljob import GlueEtlJob, etljob_policy
from jobworkflow import JobWorkflow, WorkflowDefinition
from typing import Dict, List
from datetime import datetime
import os, sys
sys.path.insert(0, "../../src")
import pkgutil, jobs as jobsdir
import json
import glob

project = pulumi.get_project()
env = pulumi.get_stack()
aws_region = pulumi.Config('aws').get('region')

# tag everything with pulumi stack + project
tags = {
    'hca:pulumi_stack': pulumi.get_stack(),
    'hca:pulumi_project': pulumi.get_project()
}

# not supported yet, thought would be to revert to prior version
#existing_dist = pulumi.Config().get('existing_dist')

# relative paths for folders used in distribution
dist_dir = os.path.abspath('../dist')
workflow_dir = os.path.abspath('../../src/metadata/workflows')
예제 #19
0
import pulumi_aws as aws
import pulumi

cfg = pulumi.Config()

bucket_name = cfg.require("s3-bucket-name")

creation_date = datetime.utcnow().strftime('%Y/%m/%d')

tags = {
    'Environment': cfg.require('environment'),
    'BillingProject': cfg.require('billing-project'),
    'CreatedBy': 'Pulumi',
    'CreatedOn': creation_date,
    'Owner': cfg.require('owner'),
    'PulumiProject': pulumi.get_project(),
    'PulumiStack': pulumi.get_stack(),
    'Customer': cfg.require_secret('customer')
}

opts = pulumi.ResourceOptions()

if cfg.get_bool("local-mode"):
    opts.provider = aws.Provider(resource_name="localstack",
                                 access_key="integration-testing",
                                 secret_key="integration-testing",
                                 region="us-east-1",
                                 endpoints=[{
                                     "s3": "http://localhost:4572"
                                 }],
                                 skip_credentials_validation=True,
예제 #20
0
파일: hub.py 프로젝트: sylver/examples
    def __init__(self, name: str, props: HubProps, opts: ResourceOptions=None):
        super().__init__('vdc:network:Hub', name, {}, opts)

        # retrieve configuration
        dmz_ar = props.config.require('dmz_ar')
        fwm_ar = props.config.get('fwm_ar')
        fws_ar = props.config.require('fws_ar')
        fwz_as = props.config.require('fwz_as')
        gws_ar = props.config.require('gws_ar')
        hbs_ar = props.config.get('hbs_ar')
        hub_ar = props.config.get('hub_ar')
        hub_as = props.config.require('hub_as')

        # set vdc defaults
        vdc.resource_group_name = props.resource_group.name
        vdc.location = props.resource_group.location
        vdc.tags = props.tags
        vdc.self = self

        # Azure Virtual Network to which spokes will be peered
        # separate address spaces to simplify custom routing
        hub = vdc.virtual_network(name, [fwz_as, hub_as])

        # DMZ subnet
        hub_dmz_sn = vdc.subnet_special( #ToDo add NSG
            stem = f'{name}-dmz',
            name = 'DMZ', # name not required but preferred
            virtual_network_name = hub.name,
            address_prefix = dmz_ar,
        )

        # AzureFirewallSubnet
        hub_fw_sn = vdc.subnet_special(
            stem = f'{name}-fw',
            name = 'AzureFirewallSubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = fws_ar,
        )

        # GatewaySubnet
        hub_gw_sn = vdc.subnet_special(
            stem = f'{name}-gw',
            name = 'GatewaySubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = gws_ar,
        )

        # provisioning of Gateways and Firewall depends_on subnets
        # to avoid contention in the Azure control plane

        # VPN Gateway
        hub_vpn_gw = vdc.vpn_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # ExpressRoute Gateway
        hub_er_gw = vdc.expressroute_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # Azure Firewall
        hub_fw = vdc.firewall(
            stem = name,
            subnet_id = hub_fw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # provisioning of optional subnets depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # AzureBastionSubnet (optional)
        if hbs_ar:
            hub_ab_sn = vdc.subnet_special( #ToDo add NSG if required
                stem = f'{name}-ab',
                name = 'AzureBastionSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = hbs_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # AzureFirewallManagementSubnet (optional)
        if fwm_ar:
            hub_fwm_sn = vdc.subnet_special(
                stem = f'{name}-fwm',
                name = 'AzureFirewallManagementSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = fwm_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # work around https://github.com/pulumi/pulumi/issues/4040
        hub_fw_ip = hub_fw.ip_configurations.apply(
            lambda ipc: ipc[0].get('private_ip_address')
        )

        # provisioning of Route Tables depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # Route Table only to be associated with the GatewaySubnet
        hub_gw_rt = vdc.route_table(
            stem = f'{name}-gw',
            disable_bgp_route_propagation = False,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate GatewaySubnet with Route Table
        hub_gw_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-gw',
            route_table_id = hub_gw_rt.id,
            subnet_id = hub_gw_sn.id,
        )

        # Route Table only to be associated with DMZ subnet
        hub_dmz_rt = vdc.route_table(
            stem = f'{name}-dmz',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate DMZ subnet with Route Table
        hub_dmz_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-dmz',
            route_table_id = hub_dmz_rt.id,
            subnet_id = hub_dmz_sn.id,
        )

        # Route Table only to be associated with ordinary subnets in hub
        hub_sn_rt = vdc.route_table(
            stem = f'{name}-sn',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # protect intra-GatewaySubnet traffic from being redirected
        vdc.route_to_virtual_network(
            stem = f'gw-gw',
            route_table_name = hub_gw_rt.name,
            address_prefix = gws_ar,
        )

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'gw-dmz', hub_gw_rt.name, dmz_ar),
            (f'gw-hub', hub_gw_rt.name, hub_as),
            (f'dmz-dg', hub_dmz_rt.name, '0.0.0.0/0'),
            (f'dmz-dmz', hub_dmz_rt.name, dmz_ar),
            (f'dmz-hub', hub_dmz_rt.name, hub_as),
            (f'sn-dg', hub_sn_rt.name, '0.0.0.0/0'),
            (f'sn-dmz', hub_sn_rt.name, dmz_ar),
            (f'sn-gw', hub_sn_rt.name, gws_ar),
        ]:
            vdc.route_to_virtual_appliance(
                stem = route[0],
                route_table_name = route[1],
                address_prefix = route[2],
                next_hop_in_ip_address = hub_fw_ip,
            )

        # VNet Peering between stacks using StackReference
        peer = props.config.get('peer')
        if peer:
            org = props.config.require('org')
            project = get_project()
            peer_stack = StackReference(f'{org}/{project}/{peer}')
            peer_hub_id = peer_stack.get_output('hub_id')
            peer_fw_ip = peer_stack.get_output('hub_fw_ip')
            peer_dmz_ar = peer_stack.get_output('dmz_ar') 
            peer_hub_as = peer_stack.get_output('hub_as')

            # VNet Peering (Global) in one direction from stack to peer
            hub_hub = vdc.vnet_peering(
                stem = props.stack,
                virtual_network_name = hub.name,
                peer = peer,
                remote_virtual_network_id = peer_hub_id,
                allow_forwarded_traffic = True,
                allow_gateway_transit = False, # as both hubs have gateways
            )

            # need to invalidate system routes created by Global VNet Peering
            for route in [
                (f'dmz-{peer}-dmz', hub_dmz_rt.name, peer_dmz_ar),
                (f'dmz-{peer}-hub', hub_dmz_rt.name, peer_hub_as),
                (f'gw-{peer}-dmz', hub_gw_rt.name, peer_dmz_ar),
                (f'gw-{peer}-hub', hub_gw_rt.name, peer_hub_as),
                (f'sn-{peer}-dmz', hub_sn_rt.name, peer_dmz_ar),
                (f'sn-{peer}-hub', hub_sn_rt.name, peer_hub_as),
            ]:
                vdc.route_to_virtual_appliance(
                    stem = route[0],
                    route_table_name = route[1],
                    address_prefix = route[2],
                    next_hop_in_ip_address = peer_fw_ip,
                )
        
        # provisioning of subnets depends_on Route Table (Gateways & Firewall)
        # to avoid contention in the Azure control plane

        # only one shared subnet is provisioned as an example, but many can be
        if hub_ar: #ToDo replace with loop
            hub_example_sn = vdc.subnet( #ToDo add NSG
                stem = f'{name}-example',
                virtual_network_name = hub.name,
                address_prefix = hub_ar,
                depends_on=[hub_sn_rt],
            )

            # associate all hub shared services subnets to Route Table        
            hub_example_sn_rta = vdc.subnet_route_table(
                stem = f'{name}-example',
                route_table_id = hub_sn_rt.id,
                subnet_id = hub_example_sn.id,
            )

        combined_output = Output.all(
            hub_dmz_rt.name,
            hub_er_gw,
            hub_fw,
            hub_fw_ip,
            hub_gw_rt.name,
            hub.id,
            hub.name,
            hub_sn_rt.name,
            hub.subnets,
            hub_vpn_gw,
        ).apply

        self.hub_dmz_rt_name = hub_dmz_rt.name # used to add routes to spokes
        self.hub_er_gw = hub_er_gw # needed prior to VNet Peering from spokes
        self.hub_fw = hub_fw # needed prior to VNet Peering from spokes
        self.hub_fw_ip = hub_fw_ip # used to construct routes
        self.hub_gw_rt_name = hub_gw_rt.name # used to add routes to spokes
        self.hub_id = hub.id # exported and used for peering
        self.hub_name = hub.name # exported and used for peering
        self.hub_sn_rt_name = hub_sn_rt.name # used to add routes to spokes
        self.hub_subnets = hub.subnets # exported as informational
        self.hub_vpn_gw = hub_vpn_gw # needed prior to VNet Peering from spokes
        self.register_outputs({})
예제 #21
0
def main() -> None:
    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags({
        "pulumi:project": pulumi.get_project(),
        "pulumi:stack": config.STACK_NAME
    })

    upstream_stacks: Optional[UpstreamStacks] = None
    nomad_provider: Optional[pulumi.ProviderResource] = None
    consul_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        upstream_stacks = UpstreamStacks()
        nomad_provider = get_nomad_provider_address(
            upstream_stacks.nomad_server)
        # Using get_output instead of require_output so that preview passes.
        # NOTE wimax Feb 2022: Not sure the above is still the case
        consul_master_token_secret_id = upstream_stacks.consul.get_output(
            "consul-master-token-secret-id")
        consul_provider = get_consul_provider_address(
            upstream_stacks.consul, {"token": consul_master_token_secret_id})

    pulumi.export("test-user-name", config.GRAPL_TEST_USER_NAME)
    test_user_password = TestUserPassword()
    pulumi.export("test-user-password-secret-id", test_user_password.secret_id)

    # TODO: temporarily disabled until we can reconnect the ApiGateway to the new
    # web UI.
    # jwt_secret = JWTSecret()

    dynamodb_tables = dynamodb.DynamoDB()

    # TODO: Create these emitters inside the service abstraction if nothing
    # else uses them (or perhaps even if something else *does* use them)
    sysmon_log_emitter = emitter.EventEmitter("sysmon-log")
    osquery_log_emitter = emitter.EventEmitter("osquery-log")
    unid_subgraphs_generated_emitter = emitter.EventEmitter(
        "unid-subgraphs-generated")
    subgraphs_generated_emitter = emitter.EventEmitter("subgraphs-generated")
    subgraphs_merged_emitter = emitter.EventEmitter("subgraphs-merged")
    dispatched_analyzer_emitter = emitter.EventEmitter("dispatched-analyzer")

    analyzer_matched_emitter = emitter.EventEmitter(
        "analyzer-matched-subgraphs")
    pulumi.export("analyzer-matched-subgraphs-bucket",
                  analyzer_matched_emitter.bucket_name)

    all_emitters = [
        sysmon_log_emitter,
        osquery_log_emitter,
        unid_subgraphs_generated_emitter,
        subgraphs_generated_emitter,
        subgraphs_merged_emitter,
        dispatched_analyzer_emitter,
        analyzer_matched_emitter,
    ]

    sysmon_generator_queue = ServiceQueue("sysmon-generator")
    sysmon_generator_queue.subscribe_to_emitter(sysmon_log_emitter)

    osquery_generator_queue = ServiceQueue("osquery-generator")
    osquery_generator_queue.subscribe_to_emitter(osquery_log_emitter)

    node_identifier_queue = ServiceQueue("node-identifier")
    node_identifier_queue.subscribe_to_emitter(
        unid_subgraphs_generated_emitter)

    graph_merger_queue = ServiceQueue("graph-merger")
    graph_merger_queue.subscribe_to_emitter(subgraphs_generated_emitter)

    analyzer_dispatcher_queue = ServiceQueue("analyzer-dispatcher")
    analyzer_dispatcher_queue.subscribe_to_emitter(subgraphs_merged_emitter)

    analyzer_executor_queue = ServiceQueue("analyzer-executor")
    analyzer_executor_queue.subscribe_to_emitter(dispatched_analyzer_emitter)

    engagement_creator_queue = ServiceQueue("engagement-creator")
    engagement_creator_queue.subscribe_to_emitter(analyzer_matched_emitter)

    analyzers_bucket = Bucket("analyzers-bucket", sse=True)
    pulumi.export("analyzers-bucket", analyzers_bucket.bucket)
    model_plugins_bucket = Bucket("model-plugins-bucket", sse=False)
    pulumi.export("model-plugins-bucket", model_plugins_bucket.bucket)

    plugins_bucket = Bucket("plugins-bucket", sse=True)
    pulumi.export("plugins-bucket", plugins_bucket.bucket)

    plugin_buckets = [
        analyzers_bucket,
        model_plugins_bucket,
    ]

    firecracker_s3objs = FirecrackerS3BucketObjects(
        "firecracker-s3-bucket-objects",
        plugins_bucket=plugins_bucket,
        firecracker_assets=FirecrackerAssets(
            "firecracker-assets",
            repository_name=config.cloudsmith_repository_name(),
            artifacts=artifacts,
        ),
    )

    # To learn more about this syntax, see
    # https://docs.rs/env_logger/0.9.0/env_logger/#enabling-logging
    rust_log_levels = ",".join([
        "DEBUG",
        "h2::codec=WARN",
        "hyper=WARN",
        "rusoto_core=WARN",
        "rustls=WARN",
        "serde_xml_rs=WARN",
    ])
    py_log_level = "DEBUG"

    aws_env_vars_for_local = _get_aws_env_vars_for_local()
    pulumi.export("aws-env-vars-for-local", aws_env_vars_for_local)

    # These are shared across both local and prod deployments.
    nomad_inputs: Final[NomadVars] = dict(
        analyzer_bucket=analyzers_bucket.bucket,
        analyzer_dispatched_bucket=dispatched_analyzer_emitter.bucket_name,
        analyzer_dispatcher_queue=analyzer_dispatcher_queue.main_queue_url,
        analyzer_executor_queue=analyzer_executor_queue.main_queue_url,
        analyzer_matched_subgraphs_bucket=analyzer_matched_emitter.bucket_name,
        analyzer_dispatcher_dead_letter_queue=analyzer_dispatcher_queue.
        dead_letter_queue_url,
        aws_env_vars_for_local=aws_env_vars_for_local,
        aws_region=aws.get_region().name,
        container_images=_container_images(artifacts),
        engagement_creator_queue=engagement_creator_queue.main_queue_url,
        graph_merger_queue=graph_merger_queue.main_queue_url,
        graph_merger_dead_letter_queue=graph_merger_queue.
        dead_letter_queue_url,
        model_plugins_bucket=model_plugins_bucket.bucket,
        node_identifier_queue=node_identifier_queue.main_queue_url,
        node_identifier_dead_letter_queue=node_identifier_queue.
        dead_letter_queue_url,
        node_identifier_retry_queue=node_identifier_queue.retry_queue_url,
        osquery_generator_queue=osquery_generator_queue.main_queue_url,
        osquery_generator_dead_letter_queue=osquery_generator_queue.
        dead_letter_queue_url,
        py_log_level=py_log_level,
        rust_log=rust_log_levels,
        schema_properties_table_name=dynamodb_tables.schema_properties_table.
        name,
        schema_table_name=dynamodb_tables.schema_table.name,
        session_table_name=dynamodb_tables.dynamic_session_table.name,
        subgraphs_merged_bucket=subgraphs_merged_emitter.bucket_name,
        subgraphs_generated_bucket=subgraphs_generated_emitter.bucket_name,
        sysmon_generator_queue=sysmon_generator_queue.main_queue_url,
        sysmon_generator_dead_letter_queue=sysmon_generator_queue.
        dead_letter_queue_url,
        test_user_name=config.GRAPL_TEST_USER_NAME,
        unid_subgraphs_generated_bucket=unid_subgraphs_generated_emitter.
        bucket_name,
        user_auth_table=dynamodb_tables.user_auth_table.name,
        user_session_table=dynamodb_tables.user_session_table.name,
        plugin_registry_kernel_artifact_url=firecracker_s3objs.
        kernel_s3obj_url,
        plugin_s3_bucket_aws_account_id=config.AWS_ACCOUNT_ID,
        plugin_s3_bucket_name=plugins_bucket.bucket,
    )

    provision_vars: Final[NomadVars] = {
        "test_user_password_secret_id":
        test_user_password.secret_id,
        **_get_subset(
            nomad_inputs,
            {
                "aws_env_vars_for_local",
                "aws_region",
                "container_images",
                "py_log_level",
                "schema_properties_table_name",
                "schema_table_name",
                "test_user_name",
                "user_auth_table",
            },
        ),
    }

    nomad_grapl_core_timeout = "5m"

    kafka = Kafka(
        "kafka",
        confluent_environment_name=pulumi_config.require(
            "confluent-environment-name"),
    )
    e2e_service_credentials = kafka.service_credentials(
        service_name="e2e-test-runner")

    pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
    pulumi.export("kafka-e2e-sasl-username",
                  e2e_service_credentials.apply(lambda c: c.api_key))
    pulumi.export("kafka-e2e-sasl-password",
                  e2e_service_credentials.apply(lambda c: c.api_secret))
    pulumi.export("kafka-e2e-consumer-group-name",
                  kafka.consumer_group("e2e-test-runner"))

    nomad_grapl_ingress = NomadJob(
        "grapl-ingress",
        jobspec=path_from_root("nomad/grapl-ingress.nomad").resolve(),
        vars={},
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    ConsulIntentions(
        "consul-intentions",
        # consul-intentions are stored in the nomad directory so that engineers remember to create/update intentions
        # when they update nomad configs
        intention_directory=path_from_root(
            "nomad/consul-intentions").resolve(),
        opts=pulumi.ResourceOptions(provider=consul_provider),
    )

    if config.LOCAL_GRAPL:
        ###################################
        # Local Grapl
        ###################################
        organization_management_db = LocalPostgresInstance(
            name="organization-management-db",
            port=5632,
        )

        plugin_registry_db = LocalPostgresInstance(
            name="plugin-registry-db",
            port=5432,
        )

        plugin_work_queue_db = LocalPostgresInstance(
            name="plugin-work-queue-db",
            port=5532,
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_db.hostname)
        pulumi.export("plugin-work-queue-db-port",
                      str(plugin_work_queue_db.port))
        pulumi.export("plugin-work-queue-db-username",
                      plugin_work_queue_db.username)
        pulumi.export("plugin-work-queue-db-password",
                      plugin_work_queue_db.password)

        # TODO: ADD EXPORTS FOR PLUGIN-REGISTRY

        pulumi.export("organization-management-db-hostname",
                      organization_management_db.hostname)
        pulumi.export("organization-management-db-port",
                      str(organization_management_db.port))
        pulumi.export("organization-management-db-username",
                      organization_management_db.username)
        pulumi.export("organization-management-db-password",
                      organization_management_db.password)

        redis_endpoint = f"redis://{config.HOST_IP_IN_NOMAD}:6379"

        pulumi.export("redis-endpoint", redis_endpoint)

        local_grapl_core_vars: Final[NomadVars] = dict(
            organization_management_db_hostname=organization_management_db.
            hostname,
            organization_management_db_port=str(
                organization_management_db.port),
            organization_management_db_username=organization_management_db.
            username,
            organization_management_db_password=organization_management_db.
            password,
            plugin_registry_db_hostname=plugin_registry_db.hostname,
            plugin_registry_db_port=str(plugin_registry_db.port),
            plugin_registry_db_username=plugin_registry_db.username,
            plugin_registry_db_password=plugin_registry_db.password,
            plugin_work_queue_db_hostname=plugin_work_queue_db.hostname,
            plugin_work_queue_db_port=str(plugin_work_queue_db.port),
            plugin_work_queue_db_username=plugin_work_queue_db.username,
            plugin_work_queue_db_password=plugin_work_queue_db.password,
            redis_endpoint=redis_endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=local_grapl_core_vars,
            opts=ResourceOptions(custom_timeouts=CustomTimeouts(
                create=nomad_grapl_core_timeout,
                update=nomad_grapl_core_timeout)),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_core.job]),
        )

    else:
        ###################################
        # AWS Grapl
        ###################################
        # We use stack outputs from internally developed projects
        # We assume that the stack names will match the grapl stack name
        assert upstream_stacks, "Upstream stacks previously initialized"

        vpc_id = upstream_stacks.networking.require_output("grapl-vpc")
        subnet_ids = upstream_stacks.networking.require_output(
            "grapl-private-subnet-ids")
        nomad_agent_security_group_id = upstream_stacks.nomad_agents.require_output(
            "security-group")
        nomad_agent_alb_security_group_id = upstream_stacks.nomad_agents.require_output(
            "alb-security-group")
        nomad_agent_alb_listener_arn = upstream_stacks.nomad_agents.require_output(
            "alb-listener-arn")
        nomad_agent_subnet_ids = upstream_stacks.networking.require_output(
            "nomad-agents-private-subnet-ids")
        nomad_agent_role = aws.iam.Role.get(
            "nomad-agent-role",
            id=upstream_stacks.nomad_agents.require_output("iam-role"),
            # NOTE: It's somewhat odd to set a StackReference as a parent
            opts=pulumi.ResourceOptions(parent=upstream_stacks.nomad_agents),
        )

        availability_zone: pulumi.Output[str] = pulumi.Output.from_input(
            subnet_ids).apply(subnets_to_single_az)

        for _bucket in plugin_buckets:
            _bucket.grant_put_permission_to(nomad_agent_role)
            # Analyzer Dispatcher needs to be able to ListObjects on Analyzers
            # Analyzer Executor needs to be able to ListObjects on Model Plugins
            _bucket.grant_get_and_list_to(nomad_agent_role)
        for _emitter in all_emitters:
            _emitter.grant_write_to(nomad_agent_role)
            _emitter.grant_read_to(nomad_agent_role)

        cache = Cache(
            "main-cache",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        organization_management_postgres = Postgres(
            name="organization-management",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_registry_postgres = Postgres(
            name="plugin-registry",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_work_queue_postgres = Postgres(
            name="plugin-work-queue",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        pulumi.export(
            "organization-management-db-hostname",
            organization_management_postgres.host(),
        )
        pulumi.export(
            "organization-management-db-port",
            organization_management_postgres.port().apply(str),
        )
        pulumi.export(
            "organization-management-db-username",
            organization_management_postgres.username(),
        )
        pulumi.export(
            "organization-management-db-password",
            organization_management_postgres.password(),
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_postgres.host())
        pulumi.export("plugin-work-queue-db-port",
                      plugin_work_queue_postgres.port().apply(str))
        pulumi.export(
            "plugin-work-queue-db-username",
            plugin_work_queue_postgres.username(),
        )
        pulumi.export(
            "plugin-work-queue-db-password",
            plugin_work_queue_postgres.password(),
        )

        pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
        pulumi.export("redis-endpoint", cache.endpoint)

        prod_grapl_core_vars: Final[NomadVars] = dict(
            # The vars with a leading underscore indicate that the hcl local version of the variable should be used
            # instead of the var version.
            organization_management_db_hostname=organization_management_postgres
            .host(),
            organization_management_db_port=organization_management_postgres.
            port().apply(str),
            organization_management_db_username=organization_management_postgres
            .username(),
            organization_management_db_password=organization_management_postgres
            .password(),
            plugin_registry_db_hostname=plugin_registry_postgres.host(),
            plugin_registry_db_port=plugin_registry_postgres.port().apply(str),
            plugin_registry_db_username=plugin_registry_postgres.username(),
            plugin_registry_db_password=plugin_registry_postgres.password(),
            plugin_work_queue_db_hostname=plugin_work_queue_postgres.host(),
            plugin_work_queue_db_port=plugin_work_queue_postgres.port().apply(
                str),
            plugin_work_queue_db_username=plugin_work_queue_postgres.username(
            ),
            plugin_work_queue_db_password=plugin_work_queue_postgres.password(
            ),
            redis_endpoint=cache.endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=prod_grapl_core_vars,
            opts=pulumi.ResourceOptions(
                provider=nomad_provider,
                custom_timeouts=CustomTimeouts(
                    create=nomad_grapl_core_timeout,
                    update=nomad_grapl_core_timeout),
            ),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(
                depends_on=[
                    nomad_grapl_core.job,
                ],
                provider=nomad_provider,
            ),
        )

        api_gateway = ApiGateway(
            "grapl-api-gateway",
            nomad_agents_alb_security_group=nomad_agent_alb_security_group_id,
            nomad_agents_alb_listener_arn=nomad_agent_alb_listener_arn,
            nomad_agents_private_subnet_ids=nomad_agent_subnet_ids,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_ingress.job
                                                    ], ),
        )
        pulumi.export("stage-url", api_gateway.stage.invoke_url)

        # Describes resources that should be destroyed/updated between
        # E2E-in-AWS runs.
        pulumi.export(
            "stateful-resource-urns",
            [
                # grapl-core contains our dgraph instances
                nomad_grapl_core.urn,
                # We need to re-provision after we start a new dgraph
                nomad_grapl_provision.urn,
                dynamodb_tables.urn,
            ],
        )

    OpsAlarms(name="ops-alarms")
예제 #22
0
    "redata-airflow-schedule-interval") or "0 * * * *"
redata_time_col_blacklist_regex = config.get(
    "redata-time-col-blacklist-regex") or ""

# Extra tags to apply to all taggable resources
tags = config.get_object('tags') or {}

# --- DERIVED / INTERNAL DEFINITIONS ---

airflow_base_log_folder = "/opt/airflow/logs"
base_url = f"https://{target_domain}"
grafana_db_folder = "/var/lib/grafana"

# Automatically inject tags.
register_auto_tags({
    'pulumi:project': pulumi.get_project(),
    'pulumi:stack': pulumi.get_stack(),
    **tags,
})

#
# CLUSTER INFRASTRUCTURE
#

# Create a cluster
cluster = aws.ecs.Cluster('redata-cluster')

# Create a log group with 7 days retention
lg = aws.cloudwatch.LogGroup(
    'redata-log-group',
    retention_in_days=7,
예제 #23
0
파일: route53.py 프로젝트: ascential/pulpy
    def PrivateZone(self):

        resource_specs = ParseYAML(resource_type).getSpecs()

        aws_vpc_id = VPCs.VPCId()

        for r53_private_zone_name, r53_private_zone_configuration in resource_specs[
                "private-zone"].items():

            # Route53 Private Zone Dynamic Variables
            resource_name = r53_private_zone_name

            # Resetting all optional variables
            # with the default value None
            resource_comment    = \
            resource_vpcs       = \
            resource_tags       = None

            # Cheking the documents content, if present
            # we will be assigning their values to our variables,
            # otherwise we'll set them to None
            resource_comment = r53_private_zone_configuration[
                "comment"] if "comment" in r53_private_zone_configuration else None
            resource_vpcs = r53_private_zone_configuration[
                "vpcs"] if "vpcs" in r53_private_zone_configuration else None
            resource_tags = r53_private_zone_configuration[
                "tags"] if "tags" in r53_private_zone_configuration else None

            # Getting list of tags from configuration file
            tags_list = {}
            if resource_tags is not None:
                for each_tag_name, each_tag_value in resource_tags.items():
                    tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            tags_list.update({"Name": resource_name})
            tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            tags_list.update(resource_mandatory_tags)

            # Get the list of VPCs
            resource_vpcs_list = []

            for each_private_zone_vpc in (resource_vpcs):

                this_vpc = aws_vpc_id[str(each_private_zone_vpc)]
                # this_vpc = aws_vpc_id[str(each_private_zone_vpc)]
                # resource_vpcs_list.append(each_private_zone_vpc)

                resource_vpcs_list.append(this_vpc)

            print()

            # Create Route53 Private Zone
            route53_private_zone = route53.Zone(
                resource_name,
                name=resource_name,
                comment=resource_comment,
                # vpcs        = resource_vpcs_list,
                tags=tags_list
                # vpcs        = [
                #     route53.ZoneVpcArgs(
                #         vpc_id = resource_vpcs_list
                #     )
                # ]
            )

            pulumi.export(resource_name, (
                route53_private_zone.id,
                route53_private_zone.name_servers,
                route53_private_zone.zone_id,
            ))

            route53_private_zone_ids_dict.update(
                {route53_private_zone._name: route53_private_zone.id})
예제 #24
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

from vpc import Vpc

project_name = pulumi.get_project()

# Create an EKS cluster with the default configuration.
cluster1 = eks.Cluster(f"{project_name}-1")

# Create an EKS cluster with a non-default configuration.
# TODO specify tags: { "Name": f"{project_name}-2" }
vpc = Vpc(f"{project_name}-2")

cluster2 = eks.Cluster('eks-cluster',
                          vpc_id=vpc.vpc_id,
                          public_subnet_ids=vpc.public_subnet_ids,
                          public_access_cidrs=['0.0.0.0/0'],
                          desired_capacity=2,
                          min_size=2,
                          max_size=2,
                          instance_type='t3.micro',
                          # set storage class.
                          storage_classes={"gp2": eks.StorageClassArgs(
                              type='gp2', allow_volume_expansion=True, default=True, encrypted=True,)},
                          enabled_cluster_log_types=[
                              "api",
                              "audit",
                              "authenticator",
                          ],)
예제 #25
0
파일: eks.py 프로젝트: ascential/pulpy
    def Cluster():

        resource_specs = ParseYAML(resource_type).getSpecs()
        aws_subnet_id = Subnets.SubnetId()
        aws_iam_role_arn = IAM.RoleARN()
        aws_sgs_ids = SecurityGroups.SecurityGroupId()

        #
        # EKS Cluster
        #

        for eks_cluster_name, eks_cluster_configuration in resource_specs[
                "cluster"].items():

            # AWS EKS Cluster Dynamic Variables
            resource_cluster_name = eks_cluster_name
            resource_cluster_version = eks_cluster_configuration[
                "version"] if "version" in eks_cluster_configuration else None
            resource_cluster_role_arn = eks_cluster_configuration[
                "role"] if "role" in eks_cluster_configuration else None
            resource_cluster_subnets = eks_cluster_configuration[
                "subnets"] if "subnets" in eks_cluster_configuration else None
            resource_cluster_security_groups = eks_cluster_configuration[
                "security_groups"] if "security_groups" in eks_cluster_configuration else None
            resource_cluster_endpoint_private_access = eks_cluster_configuration[
                "endpoint_private_access"] if "endpoint_private_access" in eks_cluster_configuration else None
            resource_cluster_endpoint_public_access = eks_cluster_configuration[
                "endpoint_public_access"] if "endpoint_public_access" in eks_cluster_configuration else None
            resource_cluster_public_access_cidrs = eks_cluster_configuration[
                "public_access_cidrs"] if "public_access_cidrs" in eks_cluster_configuration else None

            resource_cluster_tags = None
            resource_cluster_tags = eks_cluster_configuration[
                "tags"] if "tags" in eks_cluster_configuration else None

            # Getting list of tags from configuration file
            cluster_tags_list = {}
            if resource_cluster_tags is not None:
                for each_tag_name, each_tag_value in resource_cluster_tags.items(
                ):
                    cluster_tags_list.update({each_tag_name: each_tag_value})

            # Adding mandatory tags
            cluster_tags_list.update({"Name": resource_cluster_name})
            cluster_tags_list.update({
                "Project/Stack":
                pulumi.get_project() + "/" + pulumi.get_stack()
            })
            cluster_tags_list.update(resource_mandatory_tags)

            # Get EKS Cluster IAM Role
            this_cluster_iam_role = aws_iam_role_arn[str(
                resource_cluster_role_arn)]

            # Getting the list of subnets needed for EKS Cluster
            eks_cluster_subnets_list = []
            for each_eks_cluster_subnet in resource_cluster_subnets:
                eks_cluster_subnets_list.append(
                    aws_subnet_id[str(each_eks_cluster_subnet)])

            # Getting security groups for EKS Cluster
            eks_cluster_security_groups_list = []
            for each_eks_cluster_security_group in resource_cluster_security_groups:
                eks_cluster_security_groups_list.append(
                    aws_sgs_ids[str(each_eks_cluster_security_group)])

            # Getting the list of public access cidrs for EKS Cluster
            eks_cluster_public_access_cidrs_list = []
            for each_eks_cluster_public_access_cidr in resource_cluster_public_access_cidrs:
                eks_cluster_public_access_cidrs_list.append(
                    str(each_eks_cluster_public_access_cidr))

            eks_cluster = eks.Cluster(
                resource_cluster_name,
                name=resource_cluster_name,
                version=resource_cluster_version,
                role_arn=this_cluster_iam_role,
                vpc_config={
                    'endpoint_private_access':
                    resource_cluster_endpoint_private_access,
                    'endpoint_public_access':
                    resource_cluster_endpoint_public_access,
                    'subnet_ids': eks_cluster_subnets_list,
                    'security_group_ids': eks_cluster_security_groups_list,
                    'publicAccessCidrs': eks_cluster_public_access_cidrs_list,
                },
                tags=cluster_tags_list)

            pulumi.export(eks_cluster._name, [
                eks_cluster.id, eks_cluster.arn, eks_cluster.endpoint,
                eks_cluster.certificate_authority
            ])

            #
            # EKS Node Groups
            #

            for eks_nodegroup_name, eks_nodegroup_configuration in eks_cluster_configuration[
                    "nodegroup"].items():

                # AWS EKS Node Group Dynamic Variables
                resource_nodegroup_name = eks_nodegroup_name

                resource_nodegroup_role_arn = eks_nodegroup_configuration[
                    "role"] if "role" in eks_nodegroup_configuration else None
                resource_nodegroup_subnets = eks_nodegroup_configuration[
                    "subnets"] if "subnets" in eks_nodegroup_configuration else None
                resource_nodegroup_instance_type = eks_nodegroup_configuration[
                    "instance_type"] if "instance_type" in eks_nodegroup_configuration else None
                resource_nodegroup_instance_disk_size = eks_nodegroup_configuration[
                    "instance_disk_size"] if "instance_disk_size" in eks_nodegroup_configuration else 40
                resource_nodegroup_desired_size = eks_nodegroup_configuration[
                    "scaling"][
                        "desired_size"] if "desired_size" in eks_nodegroup_configuration[
                            "scaling"] else 3
                resource_nodegroup_max_size = eks_nodegroup_configuration[
                    "scaling"][
                        "max_size"] if "max_size" in eks_nodegroup_configuration[
                            "scaling"] else 3
                resource_nodegroup_min_size = eks_nodegroup_configuration[
                    "scaling"][
                        "min_size"] if "min_size" in eks_nodegroup_configuration[
                            "scaling"] else 3
                resource_nodegroup_ami_type = eks_nodegroup_configuration[
                    "ami_type"] if "ami_type" in eks_nodegroup_configuration else None
                resource_nodegroup_capacity_type = eks_nodegroup_configuration[
                    "capacity_type"] if "capacity_type" in eks_nodegroup_configuration else None

                resource_tags = None
                resource_tags = eks_nodegroup_configuration[
                    "tags"] if "tags" in eks_nodegroup_configuration else None

                # Getting list of tags from configuration file
                nodegroup_tags_list = {}
                if resource_tags is not None:
                    for each_tag_name, each_tag_value in resource_tags.items():
                        nodegroup_tags_list.update(
                            {each_tag_name: each_tag_value})

                # Adding mandatory tags
                nodegroup_tags_list.update({"Name": resource_nodegroup_name})
                nodegroup_tags_list.update({
                    "Project/Stack":
                    pulumi.get_project() + "/" + pulumi.get_stack()
                })
                nodegroup_tags_list.update(resource_mandatory_tags)

                # Getting the list of subnets needed for EKS Node Group
                eks_nodegroup_subnets_list = []
                if resource_nodegroup_subnets is not None:
                    for each_eks_nodegroup_subnet in resource_nodegroup_subnets:
                        eks_nodegroup_subnets_list.append(
                            aws_subnet_id[str(each_eks_nodegroup_subnet)])

                # Get EKS Node Group IAM Role
                this_nodegroup_iam_role = aws_iam_role_arn[str(
                    resource_nodegroup_role_arn)]

                eks_node_group = eks.NodeGroup(
                    resource_nodegroup_name,
                    cluster_name=eks_cluster.name,
                    node_group_name=resource_nodegroup_name,
                    version=resource_cluster_version,
                    node_role_arn=this_nodegroup_iam_role,
                    subnet_ids=eks_nodegroup_subnets_list,
                    instance_types=resource_nodegroup_instance_type,
                    capacity_type=resource_nodegroup_capacity_type,
                    disk_size=resource_nodegroup_instance_disk_size,
                    ami_type=resource_nodegroup_ami_type,
                    scaling_config=eks.NodeGroupScalingConfigArgs(
                        desired_size=resource_nodegroup_desired_size,
                        max_size=resource_nodegroup_max_size,
                        min_size=resource_nodegroup_min_size,
                    ),
                    tags=nodegroup_tags_list)

                # Export
                pulumi.export(eks_node_group._name, eks_node_group.id)
예제 #26
0
파일: config.py 프로젝트: tomas-mota/pulumi
        self.keys = keys
        self.message = message


# retrieve the stack configuration data
config = Config()

# retrieve optional separator choice and suffix
separator = config.get('separator') or '-'
separator = separator[0]
if separator == ' ':
    separator = ''
suffix = config.get('suffix') or ''

# retrieve project and stack (org not yet available)
project = get_project()
stack = get_stack()
# set default tags to be applied to all taggable resources
default_tags = {
    'manager': 'pulumi',
    'project': project,
    'stack': stack,
}

# Azure Bastion hosts in hub and spokes (until functional across peerings)
azure_bastion = config.get_bool('azure_bastion')

# Azure Firewall to route all Internet-bound traffic to designated next hop
forced_tunnel = config.get('forced_tunnel')
if forced_tunnel:
    ft_ip = ip_address(forced_tunnel)  # check IP address is valid
예제 #27
0
    ami=ami_ubuntu,
    availability_zone=zone_names.apply(lambda names: names[0]),
    vpc_security_group_ids=[jumpbox_sg.id],
    subnet_id=subnet_infra.id,
    associate_public_ip_address=True,
    iam_instance_profile=jumpbox_iam_profile.id,
    source_dest_check=False,
    user_data=user_data,
    key_name=keypair.id,
    root_block_device={
        'volume_type': "standard",
        'volume_size': vol_size_ubuntu,
        'delete_on_termination': True
    },
    tags={
        'user:Project': pulumi.get_project(),
        'user:Stack': pulumi.get_stack(),
        'Name': 'jumpbox.pod.lab',
        'Owner': owner
    })

conn = provisioners.ConnectionArgs(host=jumpbox.public_ip,
                                   username='******',
                                   private_key=key.private_key_pem)

# ansible_runner.run_async(playbook="provisioning/provision_jumpbox.yml",
#                           ssh_key="aviKubernetes_pkey.pem",
#                           limit=jumpbox.public_ip,
#                           quiet=False)

pulumi.export('public_ip', jumpbox.public_ip)
예제 #28
0
파일: ec2.py 프로젝트: ascential/pulpy
    def LaunchTemplate():

        resource_specs = ParseYAML(resource_type).getSpecs()
        aws_sg_id = SecurityGroups.SecurityGroupId()
        aws_keypair_id = KeyPairs.KeyPairId()

        # Cheking if "auto-scaling-group:" is present in the configuration file
        launch_template = resource_specs["launch-template"].items(
        ) if "launch-template" in resource_specs else None

        # If "auto-scaling-group:" is present then we'll run all the code below
        if launch_template is not None:

            # Loop through all Launch Templates defined
            for launch_template_name, launch_template_configuration in launch_template:

                # If there's any configuration then we'll execute the
                # code below, else we'll pass the execution
                if launch_template_configuration is not None:

                    # AWS Launch Template Dynamic Variables

                    # Resource Name
                    resource_name = launch_template_name

                    # AWS Launch Template configuration and its Default values
                    resource_description = launch_template_configuration[
                        "description"] if "description" in launch_template_configuration else None
                    resource_instance_type = launch_template_configuration[
                        "instance-type"] if "instance-type" in launch_template_configuration else None
                    resource_ami = launch_template_configuration[
                        "ami"] if "ami" in launch_template_configuration else None
                    resource_key = launch_template_configuration[
                        "key"] if "key" in launch_template_configuration else None
                    resource_ebs_optimized = launch_template_configuration[
                        "ebs-optimized"] if "ebs-optimized" in launch_template_configuration else True
                    resource_termination_protection = launch_template_configuration[
                        "termination-protection"] if "termination-protection" in launch_template_configuration else False
                    resource_security_groups = launch_template_configuration[
                        "security-groups"] if "security-groups" in launch_template_configuration else None
                    resource_user_data = launch_template_configuration[
                        "user-data"] if "user-data" in launch_template_configuration else None
                    resource_update_default_version = launch_template_configuration[
                        "update-default-version"] if "update-default-version" in launch_template_configuration else True

                    # Resource Tags and its Default values
                    resource_tags = None
                    resource_tags = launch_template_configuration[
                        "tags"] if "tags" in launch_template_configuration else None

                    # Getting list of tags from configuration file
                    tags_list = {}
                    if resource_tags is not None:
                        for each_tag_name, each_tag_value in resource_tags.items(
                        ):
                            tags_list.update({each_tag_name: each_tag_value})

                    # Adding mandatory tags
                    tags_list.update({"Name": resource_name})
                    tags_list.update({
                        "Project/Stack":
                        pulumi.get_project() + "/" + pulumi.get_stack()
                    })
                    tags_list.update(resource_mandatory_tags)

                    # Check if the KeyPair is provided or not
                    if resource_key is None:
                        this_keypair = None
                    else:
                        this_keypair = aws_keypair_id[str(resource_key)]

                    # Getting the list of security groups found
                    security_groups_list = []
                    for each_security_group_found in resource_security_groups:
                        this_security_group = aws_sg_id[str(
                            each_security_group_found)]
                        security_groups_list.append(this_security_group)

                    # Encode "user_data" to base64 format
                    # Ref: https://stackoverflow.com/a/42759842
                    user_data_base64 = b64encode(
                        resource_user_data.encode("ascii")).decode("ascii")

                    new_launch_template = ec2.LaunchTemplate(
                        resource_name,
                        description=resource_description,
                        instance_type=resource_instance_type,
                        image_id=resource_ami,
                        key_name=this_keypair,
                        ebs_optimized=resource_ebs_optimized,
                        disable_api_termination=resource_termination_protection,
                        vpc_security_group_ids=security_groups_list,
                        # It has to be base64 encoded
                        user_data=user_data_base64,
                        tags=tags_list,
                        update_default_version=resource_update_default_version)

                    # NOTE Launch Templates ID Dictionary
                    # Update resource dictionaries
                    lt_ids_dict.update(
                        {new_launch_template._name: new_launch_template.id})
예제 #29
0
        cmd-args: config config-helper --format=json
        cmd-path: gcloud
        expiry-key: '{{.credential.token_expiry}}'
        token-key: '{{.credential.access_token}}'
      name: gcp
""".format(
    info[2]["clusterCaCertificate"],
    info[1],
    "{0}_{1}_{2}".format(project, zone, info[0]),
))

# Make a Kubernetes provider instance that uses our cluster from above.
k8s_provider = Provider("gke_k8s", kubeconfig=k8s_config)

# Create a canary deployment to test that this cluster works.
labels = {"app": "canary-{0}-{1}".format(get_project(), get_stack())}
canary = Deployment(
    "canary",
    spec={
        "selector": {
            "matchLabels": labels
        },
        "replicas": 1,
        "template": {
            "metadata": {
                "labels": labels
            },
            "spec": {
                "containers": [{
                    "name": "nginx",
                    "image": "nginx"
예제 #30
0
my_location = config.get("location")
#   resource group name
my_resource_group_name = config.get("resource_group_name")
#   name
my_name = config.get("name")
#   workspace name
my_Workspacename = config.get("workspacename")

# Databricks vpn peering name.
my_peering_name = "databricks_peering"

# Creating Tags
# stackname for tags
stackName = get_stack()
# projectname for tags
projectName = get_project()

# collection of tags
basetags = {
    "cost-center": projectName,
    "stack": stackName,
    "env": "databricks",
    "team": "engineering",
    "pulumi_cli": "yes",
    "cloud_location": my_location,
    "console_azure": "no"
}

#
# Azure Resource creating starting here.
#