Exemple #1
0
import pulumi
import pulumi_aws as aws

current_region = aws.get_region()
current_caller_identity = aws.get_caller_identity()
example_container = aws.mediastore.Container("exampleContainer")
example_container_policy = aws.mediastore.ContainerPolicy("exampleContainerPolicy",
    container_name=example_container.name,
    policy=example_container.name.apply(lambda name: f"""{{
	"Version": "2012-10-17",
	"Statement": [{{
		"Sid": "MediaStoreFullAccess",
		"Action": [ "mediastore:*" ],
		"Principal": {{"AWS" : "arn:aws:iam::{current_caller_identity.account_id}:root"}},
		"Effect": "Allow",
		"Resource": "arn:aws:mediastore:{current_caller_identity.account_id}:{current_region.name}:container/{name}/*",
		"Condition": {{
			"Bool": {{ "aws:SecureTransport": "true" }}
		}}
	}}]
}}

"""))

Exemple #2
0
def main() -> None:
    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags({
        "pulumi:project": pulumi.get_project(),
        "pulumi:stack": config.STACK_NAME
    })

    upstream_stacks: Optional[UpstreamStacks] = None
    nomad_provider: Optional[pulumi.ProviderResource] = None
    consul_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        upstream_stacks = UpstreamStacks()
        nomad_provider = get_nomad_provider_address(
            upstream_stacks.nomad_server)
        # Using get_output instead of require_output so that preview passes.
        # NOTE wimax Feb 2022: Not sure the above is still the case
        consul_master_token_secret_id = upstream_stacks.consul.get_output(
            "consul-master-token-secret-id")
        consul_provider = get_consul_provider_address(
            upstream_stacks.consul, {"token": consul_master_token_secret_id})

    pulumi.export("test-user-name", config.GRAPL_TEST_USER_NAME)
    test_user_password = TestUserPassword()
    pulumi.export("test-user-password-secret-id", test_user_password.secret_id)

    # TODO: temporarily disabled until we can reconnect the ApiGateway to the new
    # web UI.
    # jwt_secret = JWTSecret()

    dynamodb_tables = dynamodb.DynamoDB()

    # TODO: Create these emitters inside the service abstraction if nothing
    # else uses them (or perhaps even if something else *does* use them)
    sysmon_log_emitter = emitter.EventEmitter("sysmon-log")
    osquery_log_emitter = emitter.EventEmitter("osquery-log")
    unid_subgraphs_generated_emitter = emitter.EventEmitter(
        "unid-subgraphs-generated")
    subgraphs_generated_emitter = emitter.EventEmitter("subgraphs-generated")
    subgraphs_merged_emitter = emitter.EventEmitter("subgraphs-merged")
    dispatched_analyzer_emitter = emitter.EventEmitter("dispatched-analyzer")

    analyzer_matched_emitter = emitter.EventEmitter(
        "analyzer-matched-subgraphs")
    pulumi.export("analyzer-matched-subgraphs-bucket",
                  analyzer_matched_emitter.bucket_name)

    all_emitters = [
        sysmon_log_emitter,
        osquery_log_emitter,
        unid_subgraphs_generated_emitter,
        subgraphs_generated_emitter,
        subgraphs_merged_emitter,
        dispatched_analyzer_emitter,
        analyzer_matched_emitter,
    ]

    sysmon_generator_queue = ServiceQueue("sysmon-generator")
    sysmon_generator_queue.subscribe_to_emitter(sysmon_log_emitter)

    osquery_generator_queue = ServiceQueue("osquery-generator")
    osquery_generator_queue.subscribe_to_emitter(osquery_log_emitter)

    node_identifier_queue = ServiceQueue("node-identifier")
    node_identifier_queue.subscribe_to_emitter(
        unid_subgraphs_generated_emitter)

    graph_merger_queue = ServiceQueue("graph-merger")
    graph_merger_queue.subscribe_to_emitter(subgraphs_generated_emitter)

    analyzer_dispatcher_queue = ServiceQueue("analyzer-dispatcher")
    analyzer_dispatcher_queue.subscribe_to_emitter(subgraphs_merged_emitter)

    analyzer_executor_queue = ServiceQueue("analyzer-executor")
    analyzer_executor_queue.subscribe_to_emitter(dispatched_analyzer_emitter)

    engagement_creator_queue = ServiceQueue("engagement-creator")
    engagement_creator_queue.subscribe_to_emitter(analyzer_matched_emitter)

    analyzers_bucket = Bucket("analyzers-bucket", sse=True)
    pulumi.export("analyzers-bucket", analyzers_bucket.bucket)
    model_plugins_bucket = Bucket("model-plugins-bucket", sse=False)
    pulumi.export("model-plugins-bucket", model_plugins_bucket.bucket)

    plugins_bucket = Bucket("plugins-bucket", sse=True)
    pulumi.export("plugins-bucket", plugins_bucket.bucket)

    plugin_buckets = [
        analyzers_bucket,
        model_plugins_bucket,
    ]

    firecracker_s3objs = FirecrackerS3BucketObjects(
        "firecracker-s3-bucket-objects",
        plugins_bucket=plugins_bucket,
        firecracker_assets=FirecrackerAssets(
            "firecracker-assets",
            repository_name=config.cloudsmith_repository_name(),
            artifacts=artifacts,
        ),
    )

    # To learn more about this syntax, see
    # https://docs.rs/env_logger/0.9.0/env_logger/#enabling-logging
    rust_log_levels = ",".join([
        "DEBUG",
        "h2::codec=WARN",
        "hyper=WARN",
        "rusoto_core=WARN",
        "rustls=WARN",
        "serde_xml_rs=WARN",
    ])
    py_log_level = "DEBUG"

    aws_env_vars_for_local = _get_aws_env_vars_for_local()
    pulumi.export("aws-env-vars-for-local", aws_env_vars_for_local)

    # These are shared across both local and prod deployments.
    nomad_inputs: Final[NomadVars] = dict(
        analyzer_bucket=analyzers_bucket.bucket,
        analyzer_dispatched_bucket=dispatched_analyzer_emitter.bucket_name,
        analyzer_dispatcher_queue=analyzer_dispatcher_queue.main_queue_url,
        analyzer_executor_queue=analyzer_executor_queue.main_queue_url,
        analyzer_matched_subgraphs_bucket=analyzer_matched_emitter.bucket_name,
        analyzer_dispatcher_dead_letter_queue=analyzer_dispatcher_queue.
        dead_letter_queue_url,
        aws_env_vars_for_local=aws_env_vars_for_local,
        aws_region=aws.get_region().name,
        container_images=_container_images(artifacts),
        engagement_creator_queue=engagement_creator_queue.main_queue_url,
        graph_merger_queue=graph_merger_queue.main_queue_url,
        graph_merger_dead_letter_queue=graph_merger_queue.
        dead_letter_queue_url,
        model_plugins_bucket=model_plugins_bucket.bucket,
        node_identifier_queue=node_identifier_queue.main_queue_url,
        node_identifier_dead_letter_queue=node_identifier_queue.
        dead_letter_queue_url,
        node_identifier_retry_queue=node_identifier_queue.retry_queue_url,
        osquery_generator_queue=osquery_generator_queue.main_queue_url,
        osquery_generator_dead_letter_queue=osquery_generator_queue.
        dead_letter_queue_url,
        py_log_level=py_log_level,
        rust_log=rust_log_levels,
        schema_properties_table_name=dynamodb_tables.schema_properties_table.
        name,
        schema_table_name=dynamodb_tables.schema_table.name,
        session_table_name=dynamodb_tables.dynamic_session_table.name,
        subgraphs_merged_bucket=subgraphs_merged_emitter.bucket_name,
        subgraphs_generated_bucket=subgraphs_generated_emitter.bucket_name,
        sysmon_generator_queue=sysmon_generator_queue.main_queue_url,
        sysmon_generator_dead_letter_queue=sysmon_generator_queue.
        dead_letter_queue_url,
        test_user_name=config.GRAPL_TEST_USER_NAME,
        unid_subgraphs_generated_bucket=unid_subgraphs_generated_emitter.
        bucket_name,
        user_auth_table=dynamodb_tables.user_auth_table.name,
        user_session_table=dynamodb_tables.user_session_table.name,
        plugin_registry_kernel_artifact_url=firecracker_s3objs.
        kernel_s3obj_url,
        plugin_s3_bucket_aws_account_id=config.AWS_ACCOUNT_ID,
        plugin_s3_bucket_name=plugins_bucket.bucket,
    )

    provision_vars: Final[NomadVars] = {
        "test_user_password_secret_id":
        test_user_password.secret_id,
        **_get_subset(
            nomad_inputs,
            {
                "aws_env_vars_for_local",
                "aws_region",
                "container_images",
                "py_log_level",
                "schema_properties_table_name",
                "schema_table_name",
                "test_user_name",
                "user_auth_table",
            },
        ),
    }

    nomad_grapl_core_timeout = "5m"

    kafka = Kafka(
        "kafka",
        confluent_environment_name=pulumi_config.require(
            "confluent-environment-name"),
    )
    e2e_service_credentials = kafka.service_credentials(
        service_name="e2e-test-runner")

    pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
    pulumi.export("kafka-e2e-sasl-username",
                  e2e_service_credentials.apply(lambda c: c.api_key))
    pulumi.export("kafka-e2e-sasl-password",
                  e2e_service_credentials.apply(lambda c: c.api_secret))
    pulumi.export("kafka-e2e-consumer-group-name",
                  kafka.consumer_group("e2e-test-runner"))

    nomad_grapl_ingress = NomadJob(
        "grapl-ingress",
        jobspec=path_from_root("nomad/grapl-ingress.nomad").resolve(),
        vars={},
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    ConsulIntentions(
        "consul-intentions",
        # consul-intentions are stored in the nomad directory so that engineers remember to create/update intentions
        # when they update nomad configs
        intention_directory=path_from_root(
            "nomad/consul-intentions").resolve(),
        opts=pulumi.ResourceOptions(provider=consul_provider),
    )

    if config.LOCAL_GRAPL:
        ###################################
        # Local Grapl
        ###################################
        organization_management_db = LocalPostgresInstance(
            name="organization-management-db",
            port=5632,
        )

        plugin_registry_db = LocalPostgresInstance(
            name="plugin-registry-db",
            port=5432,
        )

        plugin_work_queue_db = LocalPostgresInstance(
            name="plugin-work-queue-db",
            port=5532,
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_db.hostname)
        pulumi.export("plugin-work-queue-db-port",
                      str(plugin_work_queue_db.port))
        pulumi.export("plugin-work-queue-db-username",
                      plugin_work_queue_db.username)
        pulumi.export("plugin-work-queue-db-password",
                      plugin_work_queue_db.password)

        # TODO: ADD EXPORTS FOR PLUGIN-REGISTRY

        pulumi.export("organization-management-db-hostname",
                      organization_management_db.hostname)
        pulumi.export("organization-management-db-port",
                      str(organization_management_db.port))
        pulumi.export("organization-management-db-username",
                      organization_management_db.username)
        pulumi.export("organization-management-db-password",
                      organization_management_db.password)

        redis_endpoint = f"redis://{config.HOST_IP_IN_NOMAD}:6379"

        pulumi.export("redis-endpoint", redis_endpoint)

        local_grapl_core_vars: Final[NomadVars] = dict(
            organization_management_db_hostname=organization_management_db.
            hostname,
            organization_management_db_port=str(
                organization_management_db.port),
            organization_management_db_username=organization_management_db.
            username,
            organization_management_db_password=organization_management_db.
            password,
            plugin_registry_db_hostname=plugin_registry_db.hostname,
            plugin_registry_db_port=str(plugin_registry_db.port),
            plugin_registry_db_username=plugin_registry_db.username,
            plugin_registry_db_password=plugin_registry_db.password,
            plugin_work_queue_db_hostname=plugin_work_queue_db.hostname,
            plugin_work_queue_db_port=str(plugin_work_queue_db.port),
            plugin_work_queue_db_username=plugin_work_queue_db.username,
            plugin_work_queue_db_password=plugin_work_queue_db.password,
            redis_endpoint=redis_endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=local_grapl_core_vars,
            opts=ResourceOptions(custom_timeouts=CustomTimeouts(
                create=nomad_grapl_core_timeout,
                update=nomad_grapl_core_timeout)),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_core.job]),
        )

    else:
        ###################################
        # AWS Grapl
        ###################################
        # We use stack outputs from internally developed projects
        # We assume that the stack names will match the grapl stack name
        assert upstream_stacks, "Upstream stacks previously initialized"

        vpc_id = upstream_stacks.networking.require_output("grapl-vpc")
        subnet_ids = upstream_stacks.networking.require_output(
            "grapl-private-subnet-ids")
        nomad_agent_security_group_id = upstream_stacks.nomad_agents.require_output(
            "security-group")
        nomad_agent_alb_security_group_id = upstream_stacks.nomad_agents.require_output(
            "alb-security-group")
        nomad_agent_alb_listener_arn = upstream_stacks.nomad_agents.require_output(
            "alb-listener-arn")
        nomad_agent_subnet_ids = upstream_stacks.networking.require_output(
            "nomad-agents-private-subnet-ids")
        nomad_agent_role = aws.iam.Role.get(
            "nomad-agent-role",
            id=upstream_stacks.nomad_agents.require_output("iam-role"),
            # NOTE: It's somewhat odd to set a StackReference as a parent
            opts=pulumi.ResourceOptions(parent=upstream_stacks.nomad_agents),
        )

        availability_zone: pulumi.Output[str] = pulumi.Output.from_input(
            subnet_ids).apply(subnets_to_single_az)

        for _bucket in plugin_buckets:
            _bucket.grant_put_permission_to(nomad_agent_role)
            # Analyzer Dispatcher needs to be able to ListObjects on Analyzers
            # Analyzer Executor needs to be able to ListObjects on Model Plugins
            _bucket.grant_get_and_list_to(nomad_agent_role)
        for _emitter in all_emitters:
            _emitter.grant_write_to(nomad_agent_role)
            _emitter.grant_read_to(nomad_agent_role)

        cache = Cache(
            "main-cache",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        organization_management_postgres = Postgres(
            name="organization-management",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_registry_postgres = Postgres(
            name="plugin-registry",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_work_queue_postgres = Postgres(
            name="plugin-work-queue",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        pulumi.export(
            "organization-management-db-hostname",
            organization_management_postgres.host(),
        )
        pulumi.export(
            "organization-management-db-port",
            organization_management_postgres.port().apply(str),
        )
        pulumi.export(
            "organization-management-db-username",
            organization_management_postgres.username(),
        )
        pulumi.export(
            "organization-management-db-password",
            organization_management_postgres.password(),
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_postgres.host())
        pulumi.export("plugin-work-queue-db-port",
                      plugin_work_queue_postgres.port().apply(str))
        pulumi.export(
            "plugin-work-queue-db-username",
            plugin_work_queue_postgres.username(),
        )
        pulumi.export(
            "plugin-work-queue-db-password",
            plugin_work_queue_postgres.password(),
        )

        pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
        pulumi.export("redis-endpoint", cache.endpoint)

        prod_grapl_core_vars: Final[NomadVars] = dict(
            # The vars with a leading underscore indicate that the hcl local version of the variable should be used
            # instead of the var version.
            organization_management_db_hostname=organization_management_postgres
            .host(),
            organization_management_db_port=organization_management_postgres.
            port().apply(str),
            organization_management_db_username=organization_management_postgres
            .username(),
            organization_management_db_password=organization_management_postgres
            .password(),
            plugin_registry_db_hostname=plugin_registry_postgres.host(),
            plugin_registry_db_port=plugin_registry_postgres.port().apply(str),
            plugin_registry_db_username=plugin_registry_postgres.username(),
            plugin_registry_db_password=plugin_registry_postgres.password(),
            plugin_work_queue_db_hostname=plugin_work_queue_postgres.host(),
            plugin_work_queue_db_port=plugin_work_queue_postgres.port().apply(
                str),
            plugin_work_queue_db_username=plugin_work_queue_postgres.username(
            ),
            plugin_work_queue_db_password=plugin_work_queue_postgres.password(
            ),
            redis_endpoint=cache.endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=prod_grapl_core_vars,
            opts=pulumi.ResourceOptions(
                provider=nomad_provider,
                custom_timeouts=CustomTimeouts(
                    create=nomad_grapl_core_timeout,
                    update=nomad_grapl_core_timeout),
            ),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(
                depends_on=[
                    nomad_grapl_core.job,
                ],
                provider=nomad_provider,
            ),
        )

        api_gateway = ApiGateway(
            "grapl-api-gateway",
            nomad_agents_alb_security_group=nomad_agent_alb_security_group_id,
            nomad_agents_alb_listener_arn=nomad_agent_alb_listener_arn,
            nomad_agents_private_subnet_ids=nomad_agent_subnet_ids,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_ingress.job
                                                    ], ),
        )
        pulumi.export("stage-url", api_gateway.stage.invoke_url)

        # Describes resources that should be destroyed/updated between
        # E2E-in-AWS runs.
        pulumi.export(
            "stateful-resource-urns",
            [
                # grapl-core contains our dgraph instances
                nomad_grapl_core.urn,
                # We need to re-provision after we start a new dgraph
                nomad_grapl_provision.urn,
                dynamodb_tables.urn,
            ],
        )

    OpsAlarms(name="ops-alarms")
Exemple #3
0
import infra.network as network
import infra.storage as storage
import infra.iam as iam
import infra.lambdas as lambdas
import infra.socketapi as socketapi
import os, sys
from pulumi import Config
from pulumi import export
from pulumi_aws import get_region, get_caller_identity

region = get_region()
account = get_caller_identity().account_id
appcode_path = os.environ.get("CHATAPP_LIB", None)
stage_config = Config().require_object("stage_config")
if not appcode_path:
    appcode_path = os.path.curdir
vpc_details = network.setup_vpc()
security_groups = network.create_firewall_rules(vpc_details['vpc'])
storage_nodes = storage.create_storage_nodes(vpc_details['private_subnets'],
                                             security_groups)
lambda_roles = iam.create_lambda_execution_roles(region, account)
lambda_layer = lambdas.create_lambda_layers(appcode_path)
socket_api = socketapi.create_websocket_api()
lambda_functions = lambdas.create_functions(
    appcode_path=appcode_path,
    region=region,
    account=account,
    stage=stage_config['stage'],
    lambda_execution_role=lambda_roles['role'],
    lambda_layers=lambda_layer,
    subnets=vpc_details['private_subnets'],
import pulumi, json
from pulumi import Output, ResourceOptions, export
from pulumi_aws import (s3, cloudtrail, cloudwatch, iam, get_caller_identity,
                        get_region)

# region and account ID
region = get_region().name
account_id = get_caller_identity().account_id

# Create s3 bucket for CloudTrail logging
bucket = s3.Bucket('cloudtrail-s3', force_destroy=True)


# function to create bucket policy
def bucket_policy_cloudtrial(bucket_name):
    return json.dumps({
        "Version":
        "2012-10-17",
        "Statement": [{
            "Sid": "AWSCloudTrailAclCheck",
            "Effect": "Allow",
            "Principal": {
                "Service": "cloudtrail.amazonaws.com"
            },
            "Action": "s3:GetBucketAcl",
            "Resource": f"arn:aws:s3:::{bucket_name}"
        }, {
            "Sid": "AWSCloudTrailWrite",
            "Effect": "Allow",
            "Principal": {
                "Service": "cloudtrail.amazonaws.com"
Exemple #5
0
import pulumi
import pulumi_aws as aws
import pulumi_eks as eks

project_name = pulumi.get_project()

# For CI testing only: used to set profileName to alternate AWS_PROFILE envvar.
if not os.getenv("ALT_AWS_PROFILE"):
    raise Exception("ALT_AWS_PROFILE must be set")

# AWS named profile to use.
profile_name = os.getenv("ALT_AWS_PROFILE")

# Create an AWS provider instance using the named profile creds
# and current region.
aws_provider = aws.Provider("aws-provider",
                            profile=profile_name,
                            region=aws.get_region().name)

# Define the AWS provider credential opts to configure the cluster's
# kubeconfig auth.
kubeconfig_opts = eks.KubeconfigOptionsArgs(profile_name=profile_name)

# Create the cluster using the AWS provider and credential opts.
cluster = eks.Cluster(project_name,
                      provider_credential_opts=kubeconfig_opts,
                      opts=pulumi.ResourceOptions(provider=aws_provider))

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)
Exemple #6
0
def main() -> None:
    ##### Preamble
    stack_name = config.STACK_NAME

    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags(
        {"pulumi:project": pulumi.get_project(), "pulumi:stack": stack_name}
    )

    nomad_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        nomad_server_stack = pulumi.StackReference(f"grapl/nomad/{stack_name}")
        nomad_provider = get_nomad_provider_address(nomad_server_stack)

    ##### Business Logic
    grapl_stack = GraplStack(stack_name)

    e2e_test_job_vars: NomadVars = {
        "analyzer_bucket": grapl_stack.analyzer_bucket,
        "aws_env_vars_for_local": grapl_stack.aws_env_vars_for_local,
        "aws_region": aws.get_region().name,
        "container_images": _e2e_container_images(artifacts),
        # Used by graplctl to determine if it should manual-event or not
        "stack_name": grapl_stack.upstream_stack_name,
        "kafka_bootstrap_servers": grapl_stack.kafka_bootstrap_servers,
        "kafka_sasl_username": grapl_stack.kafka_e2e_sasl_username,
        "kafka_sasl_password": grapl_stack.kafka_e2e_sasl_password,
        "kafka_consumer_group_name": grapl_stack.kafka_e2e_consumer_group_name,
        "schema_properties_table_name": grapl_stack.schema_properties_table_name,
        "sysmon_log_bucket": grapl_stack.sysmon_log_bucket,
        "schema_table_name": grapl_stack.schema_table_name,
        "sysmon_generator_queue": grapl_stack.sysmon_generator_queue,
        "test_user_name": grapl_stack.test_user_name,
        "test_user_password_secret_id": grapl_stack.test_user_password_secret_id,
    }

    e2e_tests = NomadJob(
        "e2e-tests",
        jobspec=path_from_root("nomad/e2e-tests.nomad").resolve(),
        vars=e2e_test_job_vars,
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    if config.LOCAL_GRAPL:
        # We don't do integration tests in AWS yet, mostly because the current
        # Python Pants integration test setup is funky and requires an on-disk
        # Grapl repo.

        integration_test_job_vars: NomadVars = {
            "aws_env_vars_for_local": grapl_stack.aws_env_vars_for_local,
            "aws_region": aws.get_region().name,
            "container_images": _integration_container_images(artifacts),
            "docker_user": os.environ["DOCKER_USER"],
            "grapl_root": os.environ["GRAPL_ROOT"],
            "kafka_bootstrap_servers": grapl_stack.kafka_bootstrap_servers,
            "kafka_sasl_username": grapl_stack.kafka_e2e_sasl_username,
            "kafka_sasl_password": grapl_stack.kafka_e2e_sasl_password,
            "redis_endpoint": grapl_stack.redis_endpoint,
            "schema_properties_table_name": grapl_stack.schema_properties_table_name,
            "test_user_name": grapl_stack.test_user_name,
            "test_user_password_secret_id": grapl_stack.test_user_password_secret_id,
            "plugin_work_queue_db_hostname": grapl_stack.plugin_work_queue_db_hostname,
            "plugin_work_queue_db_port": grapl_stack.plugin_work_queue_db_port,
            "plugin_work_queue_db_username": grapl_stack.plugin_work_queue_db_username,
            "plugin_work_queue_db_password": grapl_stack.plugin_work_queue_db_password,
            "organization_management_db_hostname": grapl_stack.organization_management_db_hostname,
            "organization_management_db_port": grapl_stack.organization_management_db_port,
            "organization_management_db_username": grapl_stack.organization_management_db_username,
            "organization_management_db_password": grapl_stack.organization_management_db_password,
        }

        integration_tests = NomadJob(
            "integration-tests",
            jobspec=path_from_root("nomad/local/integration-tests.nomad").resolve(),
            vars=integration_test_job_vars,
            opts=pulumi.ResourceOptions(provider=nomad_provider),
        )
Exemple #7
0
import pulumi
import pulumi_aws as aws
import pulumi_pulumi as pulumi

local = pulumi.providers.Aws("local", region="us-east-1")
peer = pulumi.providers.Aws("peer", region="us-west-2")
peer_region = aws.get_region()
local_transit_gateway = aws.ec2transitgateway.TransitGateway("localTransitGateway", tags={
    "Name": "Local TGW",
},
opts=ResourceOptions(provider=aws["local"]))
peer_transit_gateway = aws.ec2transitgateway.TransitGateway("peerTransitGateway", tags={
    "Name": "Peer TGW",
},
opts=ResourceOptions(provider=aws["peer"]))
example = aws.ec2transitgateway.PeeringAttachment("example",
    peer_account_id=peer_transit_gateway.owner_id,
    peer_region=peer_region.name,
    peer_transit_gateway_id=peer_transit_gateway.id,
    transit_gateway_id=local_transit_gateway.id,
    tags={
        "Name": "TGW Peering Requestor",
    })