Ejemplo n.º 1
0
def expressroute_gateway(stem, subnet_id, depends_on=None):
    er_gw_pip = network.PublicIp(
        f'{stem}{s}er{s}gw{s}pip{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        allocation_method='Dynamic',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    er_gw = network.VirtualNetworkGateway(
        f'{stem}{s}er{s}gw{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        sku='Standard',
        type='ExpressRoute',
        vpn_type='RouteBased',
        ip_configurations=[
            network.VirtualNetworkGatewayIpConfigurationArgs(
                name=f'{stem}{s}er{s}gw{s}ipc',
                public_ip_address_id=er_gw_pip.id,
                subnet_id=subnet_id,
            )
        ],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return er_gw
Ejemplo n.º 2
0
def expressroute_gateway(stem, subnet_id, depends_on=None):
    er_gw_pip = network.PublicIp(
        f'{stem}-er-gw-pip-',
        resource_group_name=resource_group_name,
        allocation_method='Dynamic',
        tags=tags,
        opts=ResourceOptions(parent=self),
    )
    er_gw = network.VirtualNetworkGateway(
        f'{stem}-er-gw-',
        resource_group_name=resource_group_name,
        sku='Standard',
        type='ExpressRoute',
        vpn_type='RouteBased',
        ip_configurations=[{
            'name': f'{stem}-er-gw-ipconf',
            'publicIpAddressId': er_gw_pip.id,
            'subnet_id': subnet_id,
        }],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return er_gw
Ejemplo n.º 3
0
def vpn_gateway(stem, subnet_id, depends_on=[]):
    vpn_gw_pip = network.PublicIp(
        f'{stem}-vpn-gw-pip-',
        resource_group_name=resource_group_name,
        location=location,
        allocation_method='Dynamic',
        tags=tags,
        opts=ResourceOptions(parent=self),
    )
    vpn_gw = network.VirtualNetworkGateway(
        f'{stem}-vpn-gw-',
        resource_group_name=resource_group_name,
        location=location,
        sku='VpnGw1',
        type='Vpn',
        vpn_type='RouteBased',
        ip_configurations=[{
            'name': f'{stem}-vpn-gw-ipconf',
            'subnet_id': subnet_id,
            'publicIpAddressId': vpn_gw_pip.id,
        }],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(create='1h',
                                           update='1h',
                                           delete='1h'),
        ),
    )
    return vpn_gw
Ejemplo n.º 4
0
def vpn_gateway(stem, subnet_id, depends_on=None):
    vpn_gw_pip = network.PublicIp(
        f'{stem}-vpn-gw-pip-',
        resource_group_name=resource_group_name,
        allocation_method='Dynamic',
        tags=tags,
        opts=ResourceOptions(parent=self),
    )
    vpn_gw = network.VirtualNetworkGateway(
        f'{stem}-vpn-gw-',
        resource_group_name=resource_group_name,
        sku='VpnGw1',
        type='Vpn',
        vpn_type='RouteBased',
        ip_configurations=[
            network.VirtualNetworkGatewayIpConfigurationArgs(
                name=f'{stem}-vpn-gw-ipconf',
                public_ip_address_id=vpn_gw_pip.id,
                subnet_id=subnet_id,
            )
        ],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return vpn_gw
Ejemplo n.º 5
0
def firewall(stem, subnet_id, depends_on=[]):
    fw_pip = network.PublicIp(
        f'{stem}-fw-pip-',
        resource_group_name=resource_group_name,
        sku='Standard',
        allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self),
    )
    fw = network.Firewall(
        f'{stem}-fw-',
        resource_group_name=resource_group_name,
        ip_configurations=[{
            'name': f'{stem}-fw-ipconf',
            'subnet_id': subnet_id,
            'publicIpAddressId': fw_pip.id,
        }],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return fw
Ejemplo n.º 6
0
def firewall(stem, fw_sn_id, fwm_sn_id, private_ranges, depends_on=None):
    fw_pip = network.PublicIPAddress(
        f'{stem}{s}fw{s}pip',
        public_ip_address_name=f'{stem}{s}fw{s}pip{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        sku=network.PublicIPAddressSkuArgs(name='Standard', ),
        public_ip_allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    fwm_pip = network.PublicIPAddress(
        f'{stem}{s}fwm{s}pip',
        public_ip_address_name=f'{stem}{s}fwm{s}pip{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        sku=network.PublicIPAddressSkuArgs(name='Standard', ),
        public_ip_allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    fw = network.AzureFirewall(
        f'{stem}{s}fw',
        azure_firewall_name=f'{stem}{s}fw{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        additional_properties={
            "Network.SNAT.PrivateRanges": private_ranges,
        },
        sku=network.AzureFirewallSkuArgs(
            name='AZFW_VNet',
            tier='Standard',
        ),
        ip_configurations=[
            network.AzureFirewallIPConfigurationArgs(
                name=f'{stem}{s}fw{s}ipconf{s}{suffix}',
                public_ip_address=network.PublicIPAddressArgs(id=fw_pip.id, ),
                subnet=network.SubnetArgs(id=fw_sn_id, ),
            )
        ],
        management_ip_configuration=network.AzureFirewallIPConfigurationArgs(
            name=f'{stem}{s}fwm{s}ipconf{s}{suffix}',
            public_ip_address=network.PublicIPAddressArgs(id=fwm_pip.id, ),
            subnet=network.SubnetArgs(id=fwm_sn_id, ),
        ),
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return fw
Ejemplo n.º 7
0
def firewall(stem, fw_sn_id, fwm_sn_id, private_ranges, depends_on=None):
    fw_pip = network.PublicIp(
        f'{stem}-fw-pip-',
        resource_group_name=resource_group_name,
        sku='Standard',
        allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    fwm_pip = network.PublicIp(
        f'{stem}-fwm-pip-',
        resource_group_name=resource_group_name,
        sku='Standard',
        allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    fw = network.Firewall(
        f'{stem}-fw-',
        resource_group_name=resource_group_name,
        #        additional_properties = {
        #            "Network.SNAT.PrivateRanges": private_ranges,
        #        },
        #        sku = 'AZFW_VNet',
        ip_configurations=[
            network.FirewallIpConfigurationArgs(
                name=f'{stem}-fw-ipconf',
                public_ip_address_id=fw_pip.id,
                subnet_id=fw_sn_id,
            )
        ],
        management_ip_configuration=network.FirewallIpConfigurationArgs(
            name=f'{stem}-fwm-ipconf',
            public_ip_address_id=fwm_pip.id,
            subnet_id=fwm_sn_id,
        ),
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return fw
Ejemplo n.º 8
0
def firewall(stem, fw_sn_id, fwm_sn_id, depends_on=None):
    fw_pip = network.PublicIp(
        f'{stem}-fw-pip-',
        resource_group_name=resource_group_name,
        sku='Standard',
        allocation_method='Static',
        tags=tags,
        opts=ResourceOptions(parent=self),
    )
    #    fwm_pip = network.PublicIp( # requires api 2019-11-01 or later
    #        f'{stem}-fwm-pip-',
    #        resource_group_name = resource_group_name,
    #        sku = 'Standard',
    #        allocation_method = 'Static',
    #        tags = tags,
    #        opts = ResourceOptions(parent=self),
    #    )
    fw = network.Firewall(
        f'{stem}-fw-',
        resource_group_name=resource_group_name,
        #        sku = 'AZFW_VNet', # not required but distinguishes from 'AZFW_Hub'
        ip_configurations=[
            network.FirewallIpConfigurationArgs(
                name=f'{stem}-fw-ipconf',
                public_ip_address_id=fw_pip.id,
                subnet_id=fw_sn_id,
            )
        ],
        #        management_ip_configuration = { # requires api 2019-11-01 or later
        #            'name': f'{stem}-fwm-ipconf',
        #            'publicIpAddressId': fwm_pip.id,
        #            'subnet_id': fwm_sn_id,
        #        },
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return fw
Ejemplo n.º 9
0
def vpn_gateway(stem, subnet_id, depends_on=None):
    vpn_gw_pip = network.PublicIPAddress(
        f'{stem}{s}vpn{s}gw{s}pip',
        public_ip_address_name=f'{stem}{s}vpn{s}gw{s}pip{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        public_ip_allocation_method='Dynamic',
        tags=tags,
        opts=ResourceOptions(parent=self, depends_on=depends_on),
    )
    vpn_gw = network.VirtualNetworkGateway(
        f'{stem}{s}vpn{s}gw',
        virtual_network_gateway_name=f'{stem}{s}vpn{s}gw{s}{suffix}',
        resource_group_name=resource_group_name,
        location=location,
        sku=network.VirtualNetworkGatewaySkuArgs(
            name='VpnGw1',
            tier='VpnGw1',
        ),
        gateway_type='Vpn',
        vpn_type='RouteBased',
        enable_bgp=True,
        ip_configurations=[
            network.VirtualNetworkGatewayIPConfigurationArgs(
                name=f'{stem}{s}vpn{s}gw{s}ipconf{s}{suffix}',
                public_ip_address=network.PublicIPAddressArgs(
                    id=vpn_gw_pip.id, ),
                subnet=network.SubnetArgs(id=subnet_id, ),
            )
        ],
        tags=tags,
        opts=ResourceOptions(
            parent=self,
            depends_on=depends_on,
            custom_timeouts=CustomTimeouts(
                create='1h',
                update='1h',
                delete='1h',
            ),
        ),
    )
    return vpn_gw
Ejemplo n.º 10
0
def main() -> None:
    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags({
        "pulumi:project": pulumi.get_project(),
        "pulumi:stack": config.STACK_NAME
    })

    upstream_stacks: Optional[UpstreamStacks] = None
    nomad_provider: Optional[pulumi.ProviderResource] = None
    consul_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        upstream_stacks = UpstreamStacks()
        nomad_provider = get_nomad_provider_address(
            upstream_stacks.nomad_server)
        # Using get_output instead of require_output so that preview passes.
        # NOTE wimax Feb 2022: Not sure the above is still the case
        consul_master_token_secret_id = upstream_stacks.consul.get_output(
            "consul-master-token-secret-id")
        consul_provider = get_consul_provider_address(
            upstream_stacks.consul, {"token": consul_master_token_secret_id})

    pulumi.export("test-user-name", config.GRAPL_TEST_USER_NAME)
    test_user_password = TestUserPassword()
    pulumi.export("test-user-password-secret-id", test_user_password.secret_id)

    # TODO: temporarily disabled until we can reconnect the ApiGateway to the new
    # web UI.
    # jwt_secret = JWTSecret()

    dynamodb_tables = dynamodb.DynamoDB()

    # TODO: Create these emitters inside the service abstraction if nothing
    # else uses them (or perhaps even if something else *does* use them)
    sysmon_log_emitter = emitter.EventEmitter("sysmon-log")
    osquery_log_emitter = emitter.EventEmitter("osquery-log")
    unid_subgraphs_generated_emitter = emitter.EventEmitter(
        "unid-subgraphs-generated")
    subgraphs_generated_emitter = emitter.EventEmitter("subgraphs-generated")
    subgraphs_merged_emitter = emitter.EventEmitter("subgraphs-merged")
    dispatched_analyzer_emitter = emitter.EventEmitter("dispatched-analyzer")

    analyzer_matched_emitter = emitter.EventEmitter(
        "analyzer-matched-subgraphs")
    pulumi.export("analyzer-matched-subgraphs-bucket",
                  analyzer_matched_emitter.bucket_name)

    all_emitters = [
        sysmon_log_emitter,
        osquery_log_emitter,
        unid_subgraphs_generated_emitter,
        subgraphs_generated_emitter,
        subgraphs_merged_emitter,
        dispatched_analyzer_emitter,
        analyzer_matched_emitter,
    ]

    sysmon_generator_queue = ServiceQueue("sysmon-generator")
    sysmon_generator_queue.subscribe_to_emitter(sysmon_log_emitter)

    osquery_generator_queue = ServiceQueue("osquery-generator")
    osquery_generator_queue.subscribe_to_emitter(osquery_log_emitter)

    node_identifier_queue = ServiceQueue("node-identifier")
    node_identifier_queue.subscribe_to_emitter(
        unid_subgraphs_generated_emitter)

    graph_merger_queue = ServiceQueue("graph-merger")
    graph_merger_queue.subscribe_to_emitter(subgraphs_generated_emitter)

    analyzer_dispatcher_queue = ServiceQueue("analyzer-dispatcher")
    analyzer_dispatcher_queue.subscribe_to_emitter(subgraphs_merged_emitter)

    analyzer_executor_queue = ServiceQueue("analyzer-executor")
    analyzer_executor_queue.subscribe_to_emitter(dispatched_analyzer_emitter)

    engagement_creator_queue = ServiceQueue("engagement-creator")
    engagement_creator_queue.subscribe_to_emitter(analyzer_matched_emitter)

    analyzers_bucket = Bucket("analyzers-bucket", sse=True)
    pulumi.export("analyzers-bucket", analyzers_bucket.bucket)
    model_plugins_bucket = Bucket("model-plugins-bucket", sse=False)
    pulumi.export("model-plugins-bucket", model_plugins_bucket.bucket)

    plugins_bucket = Bucket("plugins-bucket", sse=True)
    pulumi.export("plugins-bucket", plugins_bucket.bucket)

    plugin_buckets = [
        analyzers_bucket,
        model_plugins_bucket,
    ]

    firecracker_s3objs = FirecrackerS3BucketObjects(
        "firecracker-s3-bucket-objects",
        plugins_bucket=plugins_bucket,
        firecracker_assets=FirecrackerAssets(
            "firecracker-assets",
            repository_name=config.cloudsmith_repository_name(),
            artifacts=artifacts,
        ),
    )

    # To learn more about this syntax, see
    # https://docs.rs/env_logger/0.9.0/env_logger/#enabling-logging
    rust_log_levels = ",".join([
        "DEBUG",
        "h2::codec=WARN",
        "hyper=WARN",
        "rusoto_core=WARN",
        "rustls=WARN",
        "serde_xml_rs=WARN",
    ])
    py_log_level = "DEBUG"

    aws_env_vars_for_local = _get_aws_env_vars_for_local()
    pulumi.export("aws-env-vars-for-local", aws_env_vars_for_local)

    # These are shared across both local and prod deployments.
    nomad_inputs: Final[NomadVars] = dict(
        analyzer_bucket=analyzers_bucket.bucket,
        analyzer_dispatched_bucket=dispatched_analyzer_emitter.bucket_name,
        analyzer_dispatcher_queue=analyzer_dispatcher_queue.main_queue_url,
        analyzer_executor_queue=analyzer_executor_queue.main_queue_url,
        analyzer_matched_subgraphs_bucket=analyzer_matched_emitter.bucket_name,
        analyzer_dispatcher_dead_letter_queue=analyzer_dispatcher_queue.
        dead_letter_queue_url,
        aws_env_vars_for_local=aws_env_vars_for_local,
        aws_region=aws.get_region().name,
        container_images=_container_images(artifacts),
        engagement_creator_queue=engagement_creator_queue.main_queue_url,
        graph_merger_queue=graph_merger_queue.main_queue_url,
        graph_merger_dead_letter_queue=graph_merger_queue.
        dead_letter_queue_url,
        model_plugins_bucket=model_plugins_bucket.bucket,
        node_identifier_queue=node_identifier_queue.main_queue_url,
        node_identifier_dead_letter_queue=node_identifier_queue.
        dead_letter_queue_url,
        node_identifier_retry_queue=node_identifier_queue.retry_queue_url,
        osquery_generator_queue=osquery_generator_queue.main_queue_url,
        osquery_generator_dead_letter_queue=osquery_generator_queue.
        dead_letter_queue_url,
        py_log_level=py_log_level,
        rust_log=rust_log_levels,
        schema_properties_table_name=dynamodb_tables.schema_properties_table.
        name,
        schema_table_name=dynamodb_tables.schema_table.name,
        session_table_name=dynamodb_tables.dynamic_session_table.name,
        subgraphs_merged_bucket=subgraphs_merged_emitter.bucket_name,
        subgraphs_generated_bucket=subgraphs_generated_emitter.bucket_name,
        sysmon_generator_queue=sysmon_generator_queue.main_queue_url,
        sysmon_generator_dead_letter_queue=sysmon_generator_queue.
        dead_letter_queue_url,
        test_user_name=config.GRAPL_TEST_USER_NAME,
        unid_subgraphs_generated_bucket=unid_subgraphs_generated_emitter.
        bucket_name,
        user_auth_table=dynamodb_tables.user_auth_table.name,
        user_session_table=dynamodb_tables.user_session_table.name,
        plugin_registry_kernel_artifact_url=firecracker_s3objs.
        kernel_s3obj_url,
        plugin_s3_bucket_aws_account_id=config.AWS_ACCOUNT_ID,
        plugin_s3_bucket_name=plugins_bucket.bucket,
    )

    provision_vars: Final[NomadVars] = {
        "test_user_password_secret_id":
        test_user_password.secret_id,
        **_get_subset(
            nomad_inputs,
            {
                "aws_env_vars_for_local",
                "aws_region",
                "container_images",
                "py_log_level",
                "schema_properties_table_name",
                "schema_table_name",
                "test_user_name",
                "user_auth_table",
            },
        ),
    }

    nomad_grapl_core_timeout = "5m"

    kafka = Kafka(
        "kafka",
        confluent_environment_name=pulumi_config.require(
            "confluent-environment-name"),
    )
    e2e_service_credentials = kafka.service_credentials(
        service_name="e2e-test-runner")

    pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
    pulumi.export("kafka-e2e-sasl-username",
                  e2e_service_credentials.apply(lambda c: c.api_key))
    pulumi.export("kafka-e2e-sasl-password",
                  e2e_service_credentials.apply(lambda c: c.api_secret))
    pulumi.export("kafka-e2e-consumer-group-name",
                  kafka.consumer_group("e2e-test-runner"))

    nomad_grapl_ingress = NomadJob(
        "grapl-ingress",
        jobspec=path_from_root("nomad/grapl-ingress.nomad").resolve(),
        vars={},
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    ConsulIntentions(
        "consul-intentions",
        # consul-intentions are stored in the nomad directory so that engineers remember to create/update intentions
        # when they update nomad configs
        intention_directory=path_from_root(
            "nomad/consul-intentions").resolve(),
        opts=pulumi.ResourceOptions(provider=consul_provider),
    )

    if config.LOCAL_GRAPL:
        ###################################
        # Local Grapl
        ###################################
        organization_management_db = LocalPostgresInstance(
            name="organization-management-db",
            port=5632,
        )

        plugin_registry_db = LocalPostgresInstance(
            name="plugin-registry-db",
            port=5432,
        )

        plugin_work_queue_db = LocalPostgresInstance(
            name="plugin-work-queue-db",
            port=5532,
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_db.hostname)
        pulumi.export("plugin-work-queue-db-port",
                      str(plugin_work_queue_db.port))
        pulumi.export("plugin-work-queue-db-username",
                      plugin_work_queue_db.username)
        pulumi.export("plugin-work-queue-db-password",
                      plugin_work_queue_db.password)

        # TODO: ADD EXPORTS FOR PLUGIN-REGISTRY

        pulumi.export("organization-management-db-hostname",
                      organization_management_db.hostname)
        pulumi.export("organization-management-db-port",
                      str(organization_management_db.port))
        pulumi.export("organization-management-db-username",
                      organization_management_db.username)
        pulumi.export("organization-management-db-password",
                      organization_management_db.password)

        redis_endpoint = f"redis://{config.HOST_IP_IN_NOMAD}:6379"

        pulumi.export("redis-endpoint", redis_endpoint)

        local_grapl_core_vars: Final[NomadVars] = dict(
            organization_management_db_hostname=organization_management_db.
            hostname,
            organization_management_db_port=str(
                organization_management_db.port),
            organization_management_db_username=organization_management_db.
            username,
            organization_management_db_password=organization_management_db.
            password,
            plugin_registry_db_hostname=plugin_registry_db.hostname,
            plugin_registry_db_port=str(plugin_registry_db.port),
            plugin_registry_db_username=plugin_registry_db.username,
            plugin_registry_db_password=plugin_registry_db.password,
            plugin_work_queue_db_hostname=plugin_work_queue_db.hostname,
            plugin_work_queue_db_port=str(plugin_work_queue_db.port),
            plugin_work_queue_db_username=plugin_work_queue_db.username,
            plugin_work_queue_db_password=plugin_work_queue_db.password,
            redis_endpoint=redis_endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=local_grapl_core_vars,
            opts=ResourceOptions(custom_timeouts=CustomTimeouts(
                create=nomad_grapl_core_timeout,
                update=nomad_grapl_core_timeout)),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_core.job]),
        )

    else:
        ###################################
        # AWS Grapl
        ###################################
        # We use stack outputs from internally developed projects
        # We assume that the stack names will match the grapl stack name
        assert upstream_stacks, "Upstream stacks previously initialized"

        vpc_id = upstream_stacks.networking.require_output("grapl-vpc")
        subnet_ids = upstream_stacks.networking.require_output(
            "grapl-private-subnet-ids")
        nomad_agent_security_group_id = upstream_stacks.nomad_agents.require_output(
            "security-group")
        nomad_agent_alb_security_group_id = upstream_stacks.nomad_agents.require_output(
            "alb-security-group")
        nomad_agent_alb_listener_arn = upstream_stacks.nomad_agents.require_output(
            "alb-listener-arn")
        nomad_agent_subnet_ids = upstream_stacks.networking.require_output(
            "nomad-agents-private-subnet-ids")
        nomad_agent_role = aws.iam.Role.get(
            "nomad-agent-role",
            id=upstream_stacks.nomad_agents.require_output("iam-role"),
            # NOTE: It's somewhat odd to set a StackReference as a parent
            opts=pulumi.ResourceOptions(parent=upstream_stacks.nomad_agents),
        )

        availability_zone: pulumi.Output[str] = pulumi.Output.from_input(
            subnet_ids).apply(subnets_to_single_az)

        for _bucket in plugin_buckets:
            _bucket.grant_put_permission_to(nomad_agent_role)
            # Analyzer Dispatcher needs to be able to ListObjects on Analyzers
            # Analyzer Executor needs to be able to ListObjects on Model Plugins
            _bucket.grant_get_and_list_to(nomad_agent_role)
        for _emitter in all_emitters:
            _emitter.grant_write_to(nomad_agent_role)
            _emitter.grant_read_to(nomad_agent_role)

        cache = Cache(
            "main-cache",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        organization_management_postgres = Postgres(
            name="organization-management",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_registry_postgres = Postgres(
            name="plugin-registry",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_work_queue_postgres = Postgres(
            name="plugin-work-queue",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        pulumi.export(
            "organization-management-db-hostname",
            organization_management_postgres.host(),
        )
        pulumi.export(
            "organization-management-db-port",
            organization_management_postgres.port().apply(str),
        )
        pulumi.export(
            "organization-management-db-username",
            organization_management_postgres.username(),
        )
        pulumi.export(
            "organization-management-db-password",
            organization_management_postgres.password(),
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_postgres.host())
        pulumi.export("plugin-work-queue-db-port",
                      plugin_work_queue_postgres.port().apply(str))
        pulumi.export(
            "plugin-work-queue-db-username",
            plugin_work_queue_postgres.username(),
        )
        pulumi.export(
            "plugin-work-queue-db-password",
            plugin_work_queue_postgres.password(),
        )

        pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
        pulumi.export("redis-endpoint", cache.endpoint)

        prod_grapl_core_vars: Final[NomadVars] = dict(
            # The vars with a leading underscore indicate that the hcl local version of the variable should be used
            # instead of the var version.
            organization_management_db_hostname=organization_management_postgres
            .host(),
            organization_management_db_port=organization_management_postgres.
            port().apply(str),
            organization_management_db_username=organization_management_postgres
            .username(),
            organization_management_db_password=organization_management_postgres
            .password(),
            plugin_registry_db_hostname=plugin_registry_postgres.host(),
            plugin_registry_db_port=plugin_registry_postgres.port().apply(str),
            plugin_registry_db_username=plugin_registry_postgres.username(),
            plugin_registry_db_password=plugin_registry_postgres.password(),
            plugin_work_queue_db_hostname=plugin_work_queue_postgres.host(),
            plugin_work_queue_db_port=plugin_work_queue_postgres.port().apply(
                str),
            plugin_work_queue_db_username=plugin_work_queue_postgres.username(
            ),
            plugin_work_queue_db_password=plugin_work_queue_postgres.password(
            ),
            redis_endpoint=cache.endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=prod_grapl_core_vars,
            opts=pulumi.ResourceOptions(
                provider=nomad_provider,
                custom_timeouts=CustomTimeouts(
                    create=nomad_grapl_core_timeout,
                    update=nomad_grapl_core_timeout),
            ),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(
                depends_on=[
                    nomad_grapl_core.job,
                ],
                provider=nomad_provider,
            ),
        )

        api_gateway = ApiGateway(
            "grapl-api-gateway",
            nomad_agents_alb_security_group=nomad_agent_alb_security_group_id,
            nomad_agents_alb_listener_arn=nomad_agent_alb_listener_arn,
            nomad_agents_private_subnet_ids=nomad_agent_subnet_ids,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_ingress.job
                                                    ], ),
        )
        pulumi.export("stage-url", api_gateway.stage.invoke_url)

        # Describes resources that should be destroyed/updated between
        # E2E-in-AWS runs.
        pulumi.export(
            "stateful-resource-urns",
            [
                # grapl-core contains our dgraph instances
                nomad_grapl_core.urn,
                # We need to re-provision after we start a new dgraph
                nomad_grapl_provision.urn,
                dynamodb_tables.urn,
            ],
        )

    OpsAlarms(name="ops-alarms")
Ejemplo n.º 11
0
# Copyright 2016-2018, Pulumi Corporation.  All rights reserved.

from pulumi import ComponentResource, Resource, ResourceOptions
from pulumi.resource import CustomTimeouts


class Resource1(ComponentResource):
    def __init__(self, name, opts=None):
        super().__init__("my:module:Resource", name, None, opts)


# Attempt to create a resource with a CustomTimeout
res1 = Resource1(
    "res1",
    opts=ResourceOptions(
        custom_timeouts=CustomTimeouts(create='30m', delete='15m')))
Ejemplo n.º 12
0
# Copyright 2016-2018, Pulumi Corporation.  All rights reserved.

from pulumi import ComponentResource, Resource, ResourceOptions
from pulumi.resource import CustomTimeouts


class Resource1(ComponentResource):
    def __init__(self, name, opts=None):
        super().__init__("my:module:Resource", name, None, opts)


# Attempt to create a resource with a CustomTimeout
res1 = Resource1(
    "res1", opts=ResourceOptions(custom_timeouts=CustomTimeouts(create='30m')))
Ejemplo n.º 13
0
resource_group = core.ResourceGroup(
    stack + '-vdc-rg-',
    tags=default_tags,
)

# Hub virtual network with gateway, firewall, DMZ and shared services subnets
hub1 = Hub(
    config.require('hub_stem'),
    HubProps(
        config=config,
        resource_group=resource_group,
        tags=default_tags,
        stack=stack,
    ),
    opts=ResourceOptions(
        custom_timeouts=CustomTimeouts(create='1h', update='1h', delete='1h')),
)

# Spoke virtual network for application environments
spoke1 = Spoke(
    config.require('spoke_stem'),
    SpokeProps(
        config=config,
        resource_group=resource_group,
        tags=default_tags,
        hub=hub1,
    ),
    opts=ResourceOptions(
        depends_on=[hub1.hub_vpn_gw, hub1.hub_er_gw, hub1.hub_fw],
        custom_timeouts=CustomTimeouts(create='1h'),
    ),
Ejemplo n.º 14
0
# Copyright 2016-2018, Pulumi Corporation.  All rights reserved.

from pulumi import ComponentResource, Resource, ResourceOptions
from pulumi.resource import CustomTimeouts


class Resource1(ComponentResource):
    def __init__(self, name, opts=None):
        super().__init__("my:module:Resource", name, None, opts)


# Attempt to create a resource with a CustomTimeout
res3 = Resource1(
    "res3", opts=ResourceOptions(custom_timeouts=CustomTimeouts(delete='30m')))
Ejemplo n.º 15
0
# Copyright 2016-2018, Pulumi Corporation.  All rights reserved.

from pulumi import ComponentResource, Resource, ResourceOptions
from pulumi.resource import CustomTimeouts


class Resource1(ComponentResource):
    def __init__(self, name, opts=None):
        super().__init__("my:module:Resource", name, None, opts)


# Attempt to create a resource with a CustomTimeout
res1 = Resource1(
    "res1", opts=ResourceOptions(custom_timeouts=CustomTimeouts(create='30m')))

# Also use the previous workaround method, which we should not regress upon
res2 = Resource1("res2",
                 opts=ResourceOptions(custom_timeouts={
                     'create': '15m',
                     'delete': '15m'
                 }))

res3 = Resource1(
    "res3", opts=ResourceOptions(custom_timeouts=CustomTimeouts(update='30m')))

res4 = Resource1(
    "res4", opts=ResourceOptions(custom_timeouts=CustomTimeouts(delete='30m')))
Ejemplo n.º 16
0
# Copyright 2016-2018, Pulumi Corporation.  All rights reserved.

from pulumi import ComponentResource, Resource, ResourceOptions
from pulumi.resource import CustomTimeouts

class Resource1(ComponentResource):
    def __init__(self, name, opts=None):
        super().__init__("my:module:Resource", name, None, opts)

# Attempt to create a resource with a CustomTimeout
res2 = Resource1("res2",
    opts=ResourceOptions(custom_timeouts=CustomTimeouts(update='30m'))
)