Esempio n. 1
0
def get_hashicorp_provider_address(
    pulumi_class: Any,
    provider_type: str,
    stack: pulumi.StackReference,
    additional_configs: Mapping[str, Any] = {},
) -> Any:
    """
    This supports getting a Provider object with an explicit address set.
    This will take the address from the pulumi config file if it is set or fall back to the address in the stack refererence. This allows using SSM port tunneling when run from a workstation.
    :param pulumi_class: Should be one of pulumi_consul or pulumi_nomad
    :param provider_type: One of "consul", "nomad"
    :param stack: The corresponding stack reference
    :return: pulumi.providerReference. Should be used in opts
    """
    override_address = pulumi.Config(provider_type).get("address")
    address = override_address or stack.require_output("address")
    return pulumi_class.Provider(provider_type, address=address, **additional_configs)
Esempio n. 2
0
    ft_ip = ip_address(forced_tunnel)  # check IP address is valid

# another stack may be peered in the same project, even across organizations
peer = config.get('peer')
porg = config.get('org')
proj = config.get('project')
if porg and not proj:  # assume the same project in other organization
    proj = project
if not porg:  # assume the same organization
    porg = ''
if not proj:  # assume the same project
    proj = ''
if not peer:
    reference = None
else:
    reference = StackReference(f'{porg}/{proj}/{peer}')

# validate firewall_address_space and hub_address_space
firewall_address_space = config.require('firewall_address_space')
fwz_nw = ip_network(firewall_address_space)
if not fwz_nw.is_private:
    raise ConfigError(['firewall_address_space'], 'must be private')
if fwz_nw.prefixlen > 24:
    raise ConfigError(['firewall_address_space'], 'must be /24 or larger')
hub_address_space = config.require('hub_address_space')
hub_nw = ip_network(hub_address_space)
if not hub_nw.is_private:
    raise ConfigError(['hub_address_space'], 'must be private')
if hub_nw.prefixlen > 24:
    raise ConfigError(['hub_address_space'], 'must be /24 or larger')
if fwz_nw.overlaps(hub_nw):
Esempio n. 3
0
    ft_ip = ip_address(forced_tunnel)  # check IP address is valid

# another stack may be peered in the same project, even across organizations
org = config.get('org')
peer = config.get('peer')
project = config.get('project')
if org and not project:
    project = get_project()
if not org:
    org = ''
if not project:
    project = ''
if not peer:
    reference = None
else:
    reference = StackReference(f'{org}/{project}/{peer}')

# validate firewall_address_space and hub_address_space
firewall_address_space = config.require('firewall_address_space')
fwz_nw = ip_network(firewall_address_space)
if not fwz_nw.is_private:
    raise ConfigError(['firewall_address_space'], 'must be private')
if fwz_nw.prefixlen > 24:
    raise ConfigError(['firewall_address_space'], 'must be /24 or larger')
hub_address_space = config.require('hub_address_space')
hub_nw = ip_network(hub_address_space)
if not hub_nw.is_private:
    raise ConfigError(['hub_address_space'], 'must be private')
if hub_nw.prefixlen > 24:
    raise ConfigError(['hub_address_space'], 'must be /24 or larger')
if fwz_nw.overlaps(hub_nw):
Esempio n. 4
0
    def __init__(self,
                 name: str,
                 props: HubProps,
                 opts: ResourceOptions = None):
        super().__init__('vdc:network:Hub', name, {}, opts)

        # set required vdc variables before calling functions
        vdc.resource_group_name = props.resource_group_name
        vdc.tags = props.tags
        vdc.self = self

        # calculate the subnets in the firewall_address_space
        fwz_nw = ip_network(props.firewall_address_space)
        fwz_sn = fwz_nw.subnets(new_prefix=25)  # two /26 subnets required
        fwx_nw = next(fwz_sn)  # for Azure Firewall and Management subnets
        fwz_sn = fwz_nw.address_exclude(fwx_nw)  # consolidate remainder
        dmz_nw = next(fwz_sn)  # largest remaining subnet for DMZ
        fwx_sn = fwx_nw.subnets(new_prefix=26)  # split the /25 into two /26
        fws_nw = next(fwx_sn)  # AzureFirewallSubnet
        fwm_nw = next(fwx_sn)  # AzureFirewallManagementSubnet

        # calculate the subnets in the hub_address_space
        hub_nw = ip_network(props.hub_address_space)
        if hub_nw.prefixlen < 20:  # split evenly between subnets and hosts
            sub_diff = int((hub_nw.max_prefixlen - hub_nw.prefixlen) / 2)
        else:
            sub_diff = 25 - hub_nw.prefixlen  # minimum /25 subnet
        subnets = hub_nw.subnets(prefixlen_diff=sub_diff)
        next_sn = next(subnets)  # first subnet reserved for special uses
        first_sn = next_sn.subnets(new_prefix=26)  # split it into /26 subnets
        gws_nw = next(first_sn)  # GatewaySubnet /26
        rem_nw = next(first_sn)  # at least one more /26 subnet, perhaps more
        rem_sn = rem_nw.subnets(new_prefix=27)  # only need /27 save the rest
        abs_nw = next(rem_sn)  # AzureBastionSubnet /27 or greater

        # cast repeatedly referenced networks to strings
        dmz_ar = str(dmz_nw)
        gws_ar = str(gws_nw)

        # Azure Virtual Network to which spokes will be peered
        # separate address spaces to simplify custom routing
        hub = vdc.virtual_network(
            name,
            [
                props.firewall_address_space,
                props.hub_address_space,
            ],
        )

        # Azure will deploy gateways into this subnet
        hub_gw_sn = vdc.subnet_special(
            stem=f'{name}-gw',
            name='GatewaySubnet',  # name required
            virtual_network_name=hub.name,
            address_prefix=gws_ar,
        )

        # A perimeter network for Internet-facing services
        hub_dmz_sn = vdc.subnet_special(  #ToDo add NSG
            stem=f'{name}-dmz',
            name='DMZ',  # name not required but preferred
            virtual_network_name=hub.name,
            address_prefix=dmz_ar,
        )

        # Azure will deploy the firewall into this subnet
        hub_fw_sn = vdc.subnet_special(
            stem=f'{name}-fw',
            name='AzureFirewallSubnet',  # name required
            virtual_network_name=hub.name,
            address_prefix=str(fws_nw),
        )

        # Azure requires this subnet in case of forced_tunnel
        hub_fwm_sn = vdc.subnet_special(
            stem=f'{name}-fwm',
            name='AzureFirewallManagementSubnet',  # name required
            virtual_network_name=hub.name,
            address_prefix=str(fwm_nw),
        )

        # Gateways and Firewall depends_on special subnets
        # to avoid contention in the Azure control plane

        # Azure Firewall
        hub_fw = vdc.firewall(
            stem=name,
            fw_sn_id=hub_fw_sn.id,
            fwm_sn_id=hub_fwm_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_fwm_sn, hub_gw_sn],
        )

        # VPN Gateway
        hub_vpn_gw = vdc.vpn_gateway(
            stem=name,
            subnet_id=hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_fwm_sn, hub_gw_sn],
        )

        # ExpressRoute Gateway
        hub_er_gw = vdc.expressroute_gateway(
            stem=name,
            subnet_id=hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_fwm_sn, hub_gw_sn],
        )

        # Azure Bastion subnet and host (optional)
        if props.azure_bastion:
            hub_ab_sn = vdc.subnet_special(  #ToDo add NSG if required
                stem=f'{name}-ab',
                name='AzureBastionSubnet',  # name required
                virtual_network_name=hub.name,
                address_prefix=str(abs_nw),
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],  # avoid contention
            )
            hub_ab = vdc.bastion_host(
                stem=name,
                subnet_id=hub_ab_sn.id,
            )

        #ToDo requires Azure API version 2019-11-01 or later
        #if props.forced_tunnel:
        # https://docs.microsoft.com/en-us/azure/firewall/forced-tunneling

        # work around https://github.com/pulumi/pulumi/issues/4040
        hub_fw_ip = hub_fw.ip_configurations.apply(
            lambda ipc: ipc[0].get('private_ip_address'))

        # Route Table only to be associated with GatewaySubnet
        hub_gw_rt = vdc.route_table(
            stem=f'{name}-gw',
            disable_bgp_route_propagation=False,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],  # avoid contention
        )
        hub_gw_sn_rta = vdc.subnet_route_table(
            stem=f'{name}-gw',
            route_table_id=hub_gw_rt.id,
            subnet_id=hub_gw_sn.id,
        )

        # Route Table only to be associated with DMZ subnet
        hub_dmz_rt = vdc.route_table(
            stem=f'{name}-dmz',
            disable_bgp_route_propagation=True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],  # avoid contention
        )
        hub_dmz_sn_rta = vdc.subnet_route_table(
            stem=f'{name}-dmz',
            route_table_id=hub_dmz_rt.id,
            subnet_id=hub_dmz_sn.id,
        )

        # Route Table only to be associated with hub shared services subnets
        hub_ss_rt = vdc.route_table(
            stem=f'{name}-ss',
            disable_bgp_route_propagation=True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],  # avoid contention
        )

        # protect intra-GatewaySubnet traffic from being redirected
        vdc.route_to_virtual_network(
            stem=f'gw-gw',
            route_table_name=hub_gw_rt.name,
            address_prefix=gws_ar,
        )

        # it is very important to ensure that there is never a route with an
        # address_prefix which covers the AzureFirewallSubnet.

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'gw-dmz', hub_gw_rt.name, dmz_ar),
            (f'gw-hub', hub_gw_rt.name, props.hub_address_space),
            (f'dmz-dg', hub_dmz_rt.name, '0.0.0.0/0'),
            (f'dmz-dmz', hub_dmz_rt.name, dmz_ar),
            (f'dmz-hub', hub_dmz_rt.name, props.hub_address_space),
            (f'ss-dg', hub_ss_rt.name, '0.0.0.0/0'),
            (f'ss-dmz', hub_ss_rt.name, dmz_ar),
            (f'ss-gw', hub_ss_rt.name, gws_ar),
        ]:
            vdc.route_to_virtual_appliance(
                stem=route[0],
                route_table_name=route[1],
                address_prefix=route[2],
                next_hop_in_ip_address=hub_fw_ip,
            )

        # VNet Peering between stacks using StackReference
        if props.peer:
            peer_stack = StackReference(props.reference)
            peer_hub_id = peer_stack.get_output('hub_id')

            # VNet Peering (Global) in one direction from stack to peer
            hub_hub = vdc.vnet_peering(
                stem=props.stack,
                virtual_network_name=hub.name,
                peer=props.peer,
                remote_virtual_network_id=peer_hub_id,
                allow_forwarded_traffic=True,
                allow_gateway_transit=False,  # as both hubs have gateways
            )

            # need to invalidate system routes created by Global VNet Peering
            peer_dmz_ar = peer_stack.get_output('dmz_ar')
            peer_fw_ip = peer_stack.get_output('fw_ip')
            peer_hub_as = peer_stack.get_output('hub_as')

            for route in [
                (f'dmz-{props.peer}-dmz', hub_dmz_rt.name, peer_dmz_ar),
                (f'dmz-{props.peer}-hub', hub_dmz_rt.name, peer_hub_as),
                (f'gw-{props.peer}-dmz', hub_gw_rt.name, peer_dmz_ar),
                (f'gw-{props.peer}-hub', hub_gw_rt.name, peer_hub_as),
                (f'ss-{props.peer}-dmz', hub_ss_rt.name, peer_dmz_ar),
                (f'ss-{props.peer}-hub', hub_ss_rt.name, peer_hub_as),
            ]:
                vdc.route_to_virtual_appliance(
                    stem=route[0],
                    route_table_name=route[1],
                    address_prefix=route[2],
                    next_hop_in_ip_address=peer_fw_ip,
                )

        # shared services subnets starting with the second subnet
        for subnet in props.subnets:
            next_sn = next(subnets)
            hub_sn = vdc.subnet(  #ToDo add NSG
                stem=f'{name}-{subnet[0]}',
                virtual_network_name=hub.name,
                address_prefix=str(next_sn),
                depends_on=[hub_ss_rt],  # avoid contention
            )
            hub_sn_rta = vdc.subnet_route_table(
                stem=f'{name}-{subnet[0]}',
                route_table_id=hub_ss_rt.id,
                subnet_id=hub_sn.id,
            )

        # assign properties to hub including from child resources
        self.address_spaces = hub.address_spaces  # informational
        self.dmz_ar = dmz_ar  # used for routes to the hub
        self.dmz_rt_name = hub_dmz_rt.name  # used to add routes to spokes
        self.er_gw = hub_er_gw  # needed prior to VNet Peering from spokes
        self.fw = hub_fw  # needed prior to VNet Peering from spokes
        self.fw_ip = hub_fw_ip  # used for routes to the hub
        self.gw_rt_name = hub_gw_rt.name  # used to add routes to spokes
        self.hub_as = props.hub_address_space  # used for routes to the hub
        self.id = hub.id  # exported and used for stack and spoke peering
        self.location = hub.location  # informational
        self.name = hub.name  # exported and used for spoke peering
        self.peer = props.peer  # informational
        self.resource_group_name = props.resource_group_name  # informational
        self.subnets = hub.subnets  # exported as informational
        self.stack = props.stack  # informational
        self.stem = name  # used for VNet Peering from spokes
        self.ss_rt_name = hub_ss_rt.name  # used to add routes to spokes
        self.tags = props.tags  # informational
        self.vpn_gw = hub_vpn_gw  # needed prior to VNet Peering from spokes
        self.register_outputs({})
Esempio n. 5
0
"""An Azure RM Python Pulumi program"""
import pulumi
import pulumi_azure_nextgen.resources.latest as resources
import pulumi_azure_nextgen.databricks.latest as databricks

from pulumi import Output, export, Config, StackReference, get_stack, get_project

config = Config()
# reading in StackReference Path from local config
mystackpath = config.require("stackreference")
# setting the StackReference
my_network_stackreference = StackReference(mystackpath)
my_secondvirtualnetwork_output = my_network_stackreference.get_output(
    "virtual_network_name")
my_remote_resourcegroup_output = my_network_stackreference.get_output(
    "resource_group_name")
#my_secondvirtualnetwork = my_secondvirtualnetwork_output.apply(lambda my_secondvirtualnetwork_output: f"{my_secondvirtualnetwork_output}")
#my_remote_resourcegroup = my_remote_resourcegroup_output.apply(lambda my_remote_resourcegroup_output: f"{my_remote_resourcegroup_output}")

# The values for my_secondvirtualnetwork & my_remote_resourcegroup are from the virtualnetwork that has
# already been created using another pulumi stack.  This has to exist before this code can run.
my_secondvirtualnetwork = "shaht-vnet-peering-to-databricks"  # 2nd virtual network.  Needed for vpn peering block FROM databricks.
my_remote_resourcegroup = "shaht-rg-peering-to-databricks"

# local variables from config file
#   my subscription id
mysubid = config.get("mysubid")
#   azure location
my_location = config.get("location")
#   resource group name
my_resource_group_name = config.get("resource_group_name")
Esempio n. 6
0
import pulumi
from pulumi import export, ResourceOptions, Config, StackReference, get_stack, get_project
import json
import pulumi_aws as aws

# https://www.pulumi.com/docs/intro/concepts/organizing-stacks-projects/#inter-stack-dependencies

# read local config settings - network
config = Config()

# reading in vpc StackReference Path from local config
mystackpath = config.require("mystackpath")

# setting the StackReference
mycrosswalkvpc = StackReference(mystackpath)

# Get all network values from previously created vpc #
pulumi_vpc = mycrosswalkvpc.require_output("pulumi_vpc_id")
pulumi_vpc_name = mycrosswalkvpc.require_output("pulumi_vpc_name")
pulumi_vpc_cidr = mycrosswalkvpc.require_output("pulumi_vpc_cidr")
pulumi_vpc_id = mycrosswalkvpc.require_output("pulumi_vpc_id")
pulumi_private_subnets = mycrosswalkvpc.require_output(
    "pulumi_vpc_private_subnet_ids")
pulumi_public_subnets = mycrosswalkvpc.require_output(
    "pulumi_vpc_public_subnet_ids")
pulumi_az_amount = mycrosswalkvpc.require_output("pulumi_vpc_az_zones")
env_stack = get_stack()
env_project = get_project()

# common tags
Esempio n. 7
0
    def __init__(self, name: str, props: HubProps, opts: ResourceOptions=None):
        super().__init__('vdc:network:Hub', name, {}, opts)

        # retrieve configuration
        dmz_ar = props.config.require('dmz_ar')
        fwm_ar = props.config.get('fwm_ar')
        fws_ar = props.config.require('fws_ar')
        fwz_as = props.config.require('fwz_as')
        gws_ar = props.config.require('gws_ar')
        hbs_ar = props.config.get('hbs_ar')
        hub_ar = props.config.get('hub_ar')
        hub_as = props.config.require('hub_as')

        # set vdc defaults
        vdc.resource_group_name = props.resource_group.name
        vdc.location = props.resource_group.location
        vdc.tags = props.tags
        vdc.self = self

        # Azure Virtual Network to which spokes will be peered
        # separate address spaces to simplify custom routing
        hub = vdc.virtual_network(name, [fwz_as, hub_as])

        # DMZ subnet
        hub_dmz_sn = vdc.subnet_special( #ToDo add NSG
            stem = f'{name}-dmz',
            name = 'DMZ', # name not required but preferred
            virtual_network_name = hub.name,
            address_prefix = dmz_ar,
        )

        # AzureFirewallSubnet
        hub_fw_sn = vdc.subnet_special(
            stem = f'{name}-fw',
            name = 'AzureFirewallSubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = fws_ar,
        )

        # GatewaySubnet
        hub_gw_sn = vdc.subnet_special(
            stem = f'{name}-gw',
            name = 'GatewaySubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = gws_ar,
        )

        # provisioning of Gateways and Firewall depends_on subnets
        # to avoid contention in the Azure control plane

        # VPN Gateway
        hub_vpn_gw = vdc.vpn_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # ExpressRoute Gateway
        hub_er_gw = vdc.expressroute_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # Azure Firewall
        hub_fw = vdc.firewall(
            stem = name,
            subnet_id = hub_fw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # provisioning of optional subnets depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # AzureBastionSubnet (optional)
        if hbs_ar:
            hub_ab_sn = vdc.subnet_special( #ToDo add NSG if required
                stem = f'{name}-ab',
                name = 'AzureBastionSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = hbs_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # AzureFirewallManagementSubnet (optional)
        if fwm_ar:
            hub_fwm_sn = vdc.subnet_special(
                stem = f'{name}-fwm',
                name = 'AzureFirewallManagementSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = fwm_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # work around https://github.com/pulumi/pulumi/issues/4040
        hub_fw_ip = hub_fw.ip_configurations.apply(
            lambda ipc: ipc[0].get('private_ip_address')
        )

        # provisioning of Route Tables depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # Route Table only to be associated with the GatewaySubnet
        hub_gw_rt = vdc.route_table(
            stem = f'{name}-gw',
            disable_bgp_route_propagation = False,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate GatewaySubnet with Route Table
        hub_gw_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-gw',
            route_table_id = hub_gw_rt.id,
            subnet_id = hub_gw_sn.id,
        )

        # Route Table only to be associated with DMZ subnet
        hub_dmz_rt = vdc.route_table(
            stem = f'{name}-dmz',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate DMZ subnet with Route Table
        hub_dmz_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-dmz',
            route_table_id = hub_dmz_rt.id,
            subnet_id = hub_dmz_sn.id,
        )

        # Route Table only to be associated with ordinary subnets in hub
        hub_sn_rt = vdc.route_table(
            stem = f'{name}-sn',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # protect intra-GatewaySubnet traffic from being redirected
        vdc.route_to_virtual_network(
            stem = f'gw-gw',
            route_table_name = hub_gw_rt.name,
            address_prefix = gws_ar,
        )

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'gw-dmz', hub_gw_rt.name, dmz_ar),
            (f'gw-hub', hub_gw_rt.name, hub_as),
            (f'dmz-dg', hub_dmz_rt.name, '0.0.0.0/0'),
            (f'dmz-dmz', hub_dmz_rt.name, dmz_ar),
            (f'dmz-hub', hub_dmz_rt.name, hub_as),
            (f'sn-dg', hub_sn_rt.name, '0.0.0.0/0'),
            (f'sn-dmz', hub_sn_rt.name, dmz_ar),
            (f'sn-gw', hub_sn_rt.name, gws_ar),
        ]:
            vdc.route_to_virtual_appliance(
                stem = route[0],
                route_table_name = route[1],
                address_prefix = route[2],
                next_hop_in_ip_address = hub_fw_ip,
            )

        # VNet Peering between stacks using StackReference
        peer = props.config.get('peer')
        if peer:
            org = props.config.require('org')
            project = get_project()
            peer_stack = StackReference(f'{org}/{project}/{peer}')
            peer_hub_id = peer_stack.get_output('hub_id')
            peer_fw_ip = peer_stack.get_output('hub_fw_ip')
            peer_dmz_ar = peer_stack.get_output('dmz_ar') 
            peer_hub_as = peer_stack.get_output('hub_as')

            # VNet Peering (Global) in one direction from stack to peer
            hub_hub = vdc.vnet_peering(
                stem = props.stack,
                virtual_network_name = hub.name,
                peer = peer,
                remote_virtual_network_id = peer_hub_id,
                allow_forwarded_traffic = True,
                allow_gateway_transit = False, # as both hubs have gateways
            )

            # need to invalidate system routes created by Global VNet Peering
            for route in [
                (f'dmz-{peer}-dmz', hub_dmz_rt.name, peer_dmz_ar),
                (f'dmz-{peer}-hub', hub_dmz_rt.name, peer_hub_as),
                (f'gw-{peer}-dmz', hub_gw_rt.name, peer_dmz_ar),
                (f'gw-{peer}-hub', hub_gw_rt.name, peer_hub_as),
                (f'sn-{peer}-dmz', hub_sn_rt.name, peer_dmz_ar),
                (f'sn-{peer}-hub', hub_sn_rt.name, peer_hub_as),
            ]:
                vdc.route_to_virtual_appliance(
                    stem = route[0],
                    route_table_name = route[1],
                    address_prefix = route[2],
                    next_hop_in_ip_address = peer_fw_ip,
                )
        
        # provisioning of subnets depends_on Route Table (Gateways & Firewall)
        # to avoid contention in the Azure control plane

        # only one shared subnet is provisioned as an example, but many can be
        if hub_ar: #ToDo replace with loop
            hub_example_sn = vdc.subnet( #ToDo add NSG
                stem = f'{name}-example',
                virtual_network_name = hub.name,
                address_prefix = hub_ar,
                depends_on=[hub_sn_rt],
            )

            # associate all hub shared services subnets to Route Table        
            hub_example_sn_rta = vdc.subnet_route_table(
                stem = f'{name}-example',
                route_table_id = hub_sn_rt.id,
                subnet_id = hub_example_sn.id,
            )

        combined_output = Output.all(
            hub_dmz_rt.name,
            hub_er_gw,
            hub_fw,
            hub_fw_ip,
            hub_gw_rt.name,
            hub.id,
            hub.name,
            hub_sn_rt.name,
            hub.subnets,
            hub_vpn_gw,
        ).apply

        self.hub_dmz_rt_name = hub_dmz_rt.name # used to add routes to spokes
        self.hub_er_gw = hub_er_gw # needed prior to VNet Peering from spokes
        self.hub_fw = hub_fw # needed prior to VNet Peering from spokes
        self.hub_fw_ip = hub_fw_ip # used to construct routes
        self.hub_gw_rt_name = hub_gw_rt.name # used to add routes to spokes
        self.hub_id = hub.id # exported and used for peering
        self.hub_name = hub.name # exported and used for peering
        self.hub_sn_rt_name = hub_sn_rt.name # used to add routes to spokes
        self.hub_subnets = hub.subnets # exported as informational
        self.hub_vpn_gw = hub_vpn_gw # needed prior to VNet Peering from spokes
        self.register_outputs({})
Esempio n. 8
0
from pulumi import StackReference, Config, export
from pulumi_aws import get_ami, ec2

config = Config()
company_stack = StackReference(config.require("companyStack"))
department_stack = StackReference(config.require("departmentStack"))

combines_tags = {
    "department": department_stack.get_output("departmentName"),
    "company": company_stack.get_output("companyName"),
    "team": config.require("teamName"),
    "Managed By": "Pulumi",
}

ami_id = get_ami(
    most_recent="true",
    owners=["099720109477"],
    filters=[{
        "name":
        "name",
        "values": ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"]
    }]).id

instance = ec2.Instance("tagged",
                        instance_type="t2.medium",
                        ami=ami_id,
                        tags=combines_tags)

export("instance_id", instance.id)
export("instance_tags", instance.tags)
Esempio n. 9
0
"""An Azure RM Python Pulumi program"""

import pulumi
import pulumi_azure_nextgen.network.latest as network
from pulumi import Config, StackReference

# read local config settings - network
config = Config()

# reading in StackReference Path from local config
mystackpath = config.require("stackreference")
# setting the StackReference
mynetworkstackreference = StackReference(mystackpath)
# get azure subscription id
my_subid = config.get("subid")

# getting the resource group that where the current virtualnetwork ( the one that doesn't have databricks) is.
my_resource_group = mynetworkstackreference.get_output("resource_group_name")

# getting the virutal network that where the current virtualnetwork ( the one that doesn't have databricks) is.
my_virtual_network_name = mynetworkstackreference.get_output("virtual_network_name")

# Databricks resource group
my_remote_resource_group = "myWorkspace"
# Databricks virtual network
my_remote_virtual_network = "workers-vnet"
# vnet peering name FROM the virtualnetwork TO the databricks virtualnetwork
my_virtual_network_peering_name = "shaht-vnet-peering-back-to-databricks"

# vnet peering resource
v_net_peering = network.VirtualNetworkPeering("virtualNetworkPeering",
Esempio n. 10
0
from pulumi import export, StackReference, Output, ResourceOptions
from pulumi_kubernetes import Provider
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service, Namespace
import pulumi

# Create StackReference to the Kubernetes cluster stack
config = pulumi.Config()
stackRef = config.require("clusterStackRef")
infra = StackReference(f"{stackRef}")

# Declare a provider using the KubeConfig we created
# This will be used to interact with the EKS cluster
k8s_provider = Provider("k8s-provider",
                        kubeconfig=infra.get_output("kubeconfig"))

# Create a Namespace object https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
ns = Namespace("app-ns",
               metadata={
                   "name": "joe-duffy",
               },
               opts=ResourceOptions(provider=k8s_provider))

app_labels = {"app": "iac-workshop"}
app_deployment = Deployment(
    "app-dep",
    metadata={"namespace": ns.metadata["name"]},
    spec={
        "selector": {
            "match_labels": app_labels,
        },
Esempio n. 11
0
import pulumi
from pulumi import ResourceOptions, StackReference
from pulumi_azure import core, storage, mssql
from pulumi_azure.core import ResourceGroup
from pulumi_azure.authorization import Assignment
from pulumi_azure.containerservice import KubernetesCluster, Registry
from pulumi_azure.network import VirtualNetwork, Subnet
from pulumi_kubernetes import Provider
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service, Namespace

config = pulumi.Config()
SA_PASSWORD = config.require('sa_password')

infra = StackReference(f"kzhou57/pulumi-azure-quickstart/dev")

# TODO read from output
ACR_NAME = 'kzhouacr'

rg = ResourceGroup.get('rg', id=infra.get_output('resource_group_id'))

custom_provider = Provider("k8s", kubeconfig=infra.get_output('kubeconfig'))

# K8s SQL server csharpexamplesql
name = 'csharpexamplesql'
sql_namespace = Namespace(name,
                          metadata={},
                          __opts__=ResourceOptions(provider=custom_provider))

appLabels = {"appClass": name}
Esempio n. 12
0
#pulumi imports
import pulumi
from pulumi.output import Output
import pulumi_aws as aws
from pulumi import export, Config, StackReference

# need this so that later files can read the subnets
config = pulumi.Config()

# reading in vpc StackReference Path from local config
mystackpath = config.get("mystackpath")

# setting the StackReference
mycloudformationvpc = StackReference(f"{mystackpath}")
myvpccidrblock = mycloudformationvpc.require_output(
    "pulumi-cloudformation-vpc-cidr")
myvpcid = mycloudformationvpc.require_output("pulumi-cloudformation-vpc-id")
pulumi_cloudformation_vpc_with_arn = mycloudformationvpc.require_output(
    "pulumi-cloudformation-arn")

# Retrieve public subnet cidrs blocks from local config
public_subnet_cidrs = config.require_object("public_subnets")
# Retrieve private subnet cidrs blocks from local config
private_subnet_cidrs = config.require_object("private_subnets")


def get_aws_az():
    zones = aws.get_availability_zones()
    return zones.names[:
                       3]  # Returns availability zones 0, 1, 2.  So total of 3 az's.
Esempio n. 13
0
import pulumi_aws as aws
import json

# Read local config settings
config = Config()

# ---------------

# Reading state for app.pulumi.com backend
# env = pulumi.get_stack()
# infra = pulumi.StackReference(f"hello-world/pulumi-infra-az/{env}")

# ---------------

# Reading S3 state
infra = StackReference(f"pulumi-infra-az_dev")

# --------------

# Read back the default VPC and public subnets, which we will use.
pulumi_vpc = infra.get_output("pulumi-vpc-id")
pulumi_private_subnets = infra.get_output("pulumi-private-subnet-ids")
pulumi_public_subnets = infra.get_output("pulumi-public-subnet-ids")
pulumi_az_amount = infra.get_output("pulumi-az-amount")

# Create an ECS cluster to run a container-based service.
cluster = aws.ecs.Cluster("pulumi-app-cluster")

# Create a SecurityGroup that permits HTTP ingress and unrestricted egress.
sgroup = aws.ec2.SecurityGroup(
    "pulumi-app-sg",
Esempio n. 14
0
from pulumi import StackReference, get_stack

S3_STACK_REFERENCE = StackReference(f's3/{get_stack()}')

CERTS_BUCKET_NAME = S3_STACK_REFERENCE.get_output('certs_bucket_name')
LOGGING_BUCKET_NAME = S3_STACK_REFERENCE.get_output('logging_bucket_name')
PROMETHEUS_BUCKET_NAME = S3_STACK_REFERENCE.get_output(
    'prometheus_bucket_name')
Esempio n. 15
0
    def __init__(self,
                 name: str,
                 props: HubProps,
                 opts: ResourceOptions = None):
        super().__init__('vdc:network:Hub', name, {}, opts)

        # set vdc defaults
        vdc.resource_group_name = props.resource_group_name
        vdc.tags = props.tags
        vdc.self = self

        # Azure Virtual Network to which spokes will be peered
        # separate address spaces to simplify custom routing
        hub = vdc.virtual_network(name, [props.fwz_as, props.hub_as])

        # DMZ subnet
        hub_dmz_sn = vdc.subnet_special(  #ToDo add NSG
            stem=f'{name}-dmz',
            name='DMZ',  # name not required but preferred
            virtual_network_name=hub.name,
            address_prefix=props.dmz_ar,
        )

        # AzureFirewallSubnet
        hub_fw_sn = vdc.subnet_special(
            stem=f'{name}-fw',
            name='AzureFirewallSubnet',  # name required
            virtual_network_name=hub.name,
            address_prefix=props.fws_ar,
        )

        # GatewaySubnet
        hub_gw_sn = vdc.subnet_special(
            stem=f'{name}-gw',
            name='GatewaySubnet',  # name required
            virtual_network_name=hub.name,
            address_prefix=props.gws_ar,
        )

        # provisioning of Gateways and Firewall depends_on subnets
        # to avoid contention in the Azure control plane

        # VPN Gateway
        hub_vpn_gw = vdc.vpn_gateway(
            stem=name,
            subnet_id=hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # ExpressRoute Gateway
        hub_er_gw = vdc.expressroute_gateway(
            stem=name,
            subnet_id=hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # Azure Firewall
        hub_fw = vdc.firewall(
            stem=name,
            subnet_id=hub_fw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # provisioning of optional subnets depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # AzureBastionSubnet (optional)
        if props.hbs_ar:
            hub_ab_sn = vdc.subnet_special(  #ToDo add NSG if required
                stem=f'{name}-ab',
                name='AzureBastionSubnet',  # name required
                virtual_network_name=hub.name,
                address_prefix=props.hbs_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # AzureFirewallManagementSubnet (optional)
        if props.fwm_ar:
            hub_fwm_sn = vdc.subnet_special(
                stem=f'{name}-fwm',
                name='AzureFirewallManagementSubnet',  # name required
                virtual_network_name=hub.name,
                address_prefix=props.fwm_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # work around https://github.com/pulumi/pulumi/issues/4040
        hub_fw_ip = hub_fw.ip_configurations.apply(
            lambda ipc: ipc[0].get('private_ip_address'))

        # provisioning of Route Tables depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # Route Table only to be associated with the GatewaySubnet
        hub_gw_rt = vdc.route_table(
            stem=f'{name}-gw',
            disable_bgp_route_propagation=False,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate GatewaySubnet with Route Table
        hub_gw_sn_rta = vdc.subnet_route_table(
            stem=f'{name}-gw',
            route_table_id=hub_gw_rt.id,
            subnet_id=hub_gw_sn.id,
        )

        # Route Table only to be associated with DMZ subnet
        hub_dmz_rt = vdc.route_table(
            stem=f'{name}-dmz',
            disable_bgp_route_propagation=True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate DMZ subnet with Route Table
        hub_dmz_sn_rta = vdc.subnet_route_table(
            stem=f'{name}-dmz',
            route_table_id=hub_dmz_rt.id,
            subnet_id=hub_dmz_sn.id,
        )

        # Route Table only to be associated with shared services subnets in hub
        hub_ss_rt = vdc.route_table(
            stem=f'{name}-ss',
            disable_bgp_route_propagation=True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # protect intra-GatewaySubnet traffic from being redirected
        vdc.route_to_virtual_network(
            stem=f'gw-gw',
            route_table_name=hub_gw_rt.name,
            address_prefix=props.gws_ar,
        )

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'gw-dmz', hub_gw_rt.name, props.dmz_ar),
            (f'gw-hub', hub_gw_rt.name, props.hub_as),
            (f'dmz-dg', hub_dmz_rt.name, '0.0.0.0/0'),
            (f'dmz-dmz', hub_dmz_rt.name, props.dmz_ar),
            (f'dmz-hub', hub_dmz_rt.name, props.hub_as),
            (f'ss-dg', hub_ss_rt.name, '0.0.0.0/0'),
            (f'ss-dmz', hub_ss_rt.name, props.dmz_ar),
            (f'ss-gw', hub_ss_rt.name, props.gws_ar),
        ]:
            vdc.route_to_virtual_appliance(
                stem=route[0],
                route_table_name=route[1],
                address_prefix=route[2],
                next_hop_in_ip_address=hub_fw_ip,
            )

        # VNet Peering between stacks using StackReference
        if props.peer:
            peer_stack = StackReference(props.ref)
            peer_hub_id = peer_stack.get_output('hub_id')

            # VNet Peering (Global) in one direction from stack to peer
            hub_hub = vdc.vnet_peering(
                stem=props.stack,
                virtual_network_name=hub.name,
                peer=props.peer,
                remote_virtual_network_id=peer_hub_id,
                allow_forwarded_traffic=True,
                allow_gateway_transit=False,  # as both hubs have gateways
            )

            # need to invalidate system routes created by Global VNet Peering
            peer_dmz_ar = peer_stack.get_output('dmz_ar')
            peer_fw_ip = peer_stack.get_output('fw_ip')
            peer_hub_as = peer_stack.get_output('hub_as')

            for route in [
                (f'dmz-{props.peer}-dmz', hub_dmz_rt.name, peer_dmz_ar),
                (f'dmz-{props.peer}-hub', hub_dmz_rt.name, peer_hub_as),
                (f'gw-{props.peer}-dmz', hub_gw_rt.name, peer_dmz_ar),
                (f'gw-{props.peer}-hub', hub_gw_rt.name, peer_hub_as),
                (f'ss-{props.peer}-dmz', hub_ss_rt.name, peer_dmz_ar),
                (f'ss-{props.peer}-hub', hub_ss_rt.name, peer_hub_as),
            ]:
                vdc.route_to_virtual_appliance(
                    stem=route[0],
                    route_table_name=route[1],
                    address_prefix=route[2],
                    next_hop_in_ip_address=peer_fw_ip,
                )

        # provisioning of subnets depends_on Route Table (Gateways & Firewall)
        # to avoid contention in the Azure control plane

        # shared services subnets
        subnet_range = props.hub_ar
        for subnet in props.subnets:
            hub_sn = vdc.subnet(  #ToDo add NSG
                stem=f'{name}-{subnet[0]}',
                virtual_network_name=hub.name,
                address_prefix=subnet_range,
                depends_on=[hub_ss_rt],
            )
            # associate all hub shared services subnets to Route Table
            hub_sn_rta = vdc.subnet_route_table(
                stem=f'{name}-{subnet[0]}',
                route_table_id=hub_ss_rt.id,
                subnet_id=hub_sn.id,
            )
            subnet_range = vdc.subnet_next(props.hub_as, subnet_range)

        # assign properties to hub including from child resources
        self.address_spaces = hub.address_spaces  # informational
        self.dmz_ar = props.dmz_ar  # used to construct routes to the hub
        self.dmz_rt_name = hub_dmz_rt.name  # used to add routes to spokes
        self.er_gw = hub_er_gw  # needed prior to VNet Peering from spokes
        self.fw = hub_fw  # needed prior to VNet Peering from spokes
        self.fw_ip = hub_fw_ip  # used to construct routes to the hub
        self.gw_rt_name = hub_gw_rt.name  # used to add routes to spokes
        self.hub_as = props.hub_as  # used to construct routes to the hub
        self.id = hub.id  # exported and used for stack and spoke peering
        self.location = hub.location  # informational
        self.name = hub.name  # exported and used for spoke peering
        self.peer = props.peer  # informational
        self.resource_group_name = props.resource_group_name  # informational
        self.subnets = hub.subnets  # exported as informational
        self.stack = props.stack  # informational
        self.stem = name  # used for VNet Peering from spokes
        self.ss_rt_name = hub_ss_rt.name  # used to add routes to spokes
        self.tags = props.tags  # informational
        self.vpn_gw = hub_vpn_gw  # needed prior to VNet Peering from spokes
        self.register_outputs({})