Пример #1
0
pwd = config.require("sqlPassword")

resource_group = core.ResourceGroup(appname + '-' + env + '-rg')

sql_server = sql.SqlServer(resource_name=appname + '-' + env + '-sql',
                           resource_group_name=resource_group.name,
                           administrator_login=username,
                           administrator_login_password=pwd,
                           version="12.0")

database = sql.Database(appname + '-' + env + '-db',
                        resource_group_name=resource_group.name,
                        server_name=sql_server.name,
                        requested_service_objective_name="S0")

connection_string = Output.all(sql_server.name, database.name, username, pwd) \
    .apply(lambda args: f"Server=tcp:{args[0]}.database.windows.net;initial catalog={args[1]};user ID={args[2]};password={args[3]};Min Pool Size=0;Max Pool Size=30;Persist Security Info=true;")

app_service_plan = appservice.Plan(appname + '-' + env + '-asp',
                                   resource_group_name=resource_group.name,
                                   kind="App",
                                   sku={
                                       "tier": "Basic",
                                       "size": "B1",
                                   })

app_insights = appinsights.Insights(
    name=appname + '-' + env + '-sql',  # bypass auto naming
    resource_name=appname + '-' + env + '-ai',
    resource_group_name=resource_group.name,
    location=resource_group.location,
    application_type="web",
Пример #2
0
        "service": "false",
        "container": "false",
        "object": "true"
    },
    permissions={
        "read": "true",
        "write": "false",
        "delete": "false",
        "add": "false",
        "list": "false",
        "create": "false",
        "update": "false",
        "process": "false"
    },
)
httpdotnet_signed_blob_url = Output.all(httpdotnet_storage_account.name, httpdotnet_container.name, httpdotnet_zib_blob.name, account_sas.sas) \
    .apply(lambda args: f"https://{args[0]}.blob.core.windows.net/{args[1]}/{args[2]}{args[3]}")

httpdotnet_plan = appservice.Plan("http-dotnet",
                                  resource_group_name=resource_group.name,
                                  kind="FunctionApp",
                                  sku={
                                      "tier": "Dynamic",
                                      "size": "Y1"
                                  })

httpdotnet_function_app = appservice.FunctionApp(
    "http-dotnet",
    resource_group_name=resource_group.name,
    app_service_plan_id=httpdotnet_plan.id,
    storage_connection_string=httpdotnet_storage_account.
    primary_connection_string,
Пример #3
0
pulumi.export('storageaccount', account.name)

# Export the Consumption Plan
pulumi.export('consumptionplan', plan.name)

# List of storage account keys
storageAccountKeys = pulumi.Output.all(
    resource_group.name,
    account.name).apply(lambda args: storage.list_storage_account_keys(
        resource_group_name=args[0], account_name=args[1]))
# Primary storage account key
primaryStorageKey = storageAccountKeys.apply(
    lambda accountKeys: accountKeys.keys[0].value)
# Build a storage connection string out of it:
storageConnectionString = Output.concat(
    "DefaultEndpointsProtocol=https;AccountName=", account.name,
    ";AccountKey=", primaryStorageKey)

# Export the storageacountkey as a secret
pulumi.export("storageaccountkeys", pulumi.Output.secret(storageAccountKeys))
# Export the primarystoragekey as a secret
pulumi.export('primarystoragekey', pulumi.Output.secret(primaryStorageKey))
# Export the storageconnectionstring  as a secret
pulumi.export('storageconnectionstring',
              pulumi.Output.secret(storageConnectionString))

# Create the functionapp
app = web.WebApp(
    "functionapp",
    resource_group_name=resource_group.name,
    location=resource_group.location,
Пример #4
0
# Copyright 2016-2020, Pulumi Corporation.  All rights reserved.

from pulumi import export, Input, Output, ResourceOptions
from pulumi.dynamic import Resource, ResourceProvider, CreateResult

class Provider(ResourceProvider):
    def create(self, props):
        return CreateResult("1", {"prefix": props["prefix"]})

class R(Resource):
    prefix: Output[str]
    def __init__(self, name, prefix: Input[str], opts: ResourceOptions = None):
        super().__init__(Provider(), name, {"prefix": prefix}, opts)

without_secret = R("without_secret", prefix=Output.from_input("it's a secret to everybody"))
with_secret = R("with_secret", prefix=Output.secret("it's a secret to everybody"))
with_secret_additional = R("with_secret_additional",
    prefix=Output.from_input("it's a secret to everybody"),
    opts=ResourceOptions(additional_secret_outputs=["prefix"]))

export("withoutSecret", without_secret)
export("withSecret", with_secret)
export("withSecretAdditional", with_secret_additional)
            "Sid": "AWSCloudTrailCreateLogStream",
            "Effect": "Allow",
            "Action": ["logs:CreateLogStream"],
            "Resource": f"{resource}"
        }, {
            "Sid": "AWSCloudTrailPutLogEvents",
            "Effect": "Allow",
            "Action": ["logs:PutLogEvents"],
            "Resource": f"{resource}"
        }]
    })


# build resource string for iam_role_generate function
resource = Output.concat("arn:aws:logs:", region, ":", account_id,
                         ":log-group:", cloudwatch_log.name, ":log-stream:",
                         log_stream.name, "*")

# Apply policy to role
role_policy = iam.RolePolicy("cloudwatch_log_stream",
                             role=role.id,
                             policy=resource.apply(iam_role_generate))

# Create cloudtrail for s3 and write to s3 bucket
trail_s3 = cloudtrail.Trail('CloudTrail_logging_for_s3',
                            opts=ResourceOptions(depends_on=[bucket_policy]),
                            cloud_watch_logs_group_arn=cloudwatch_log.arn,
                            cloud_watch_logs_role_arn=role.arn,
                            enable_logging=True,
                            enable_log_file_validation=True,
                            name="Cloudtrail_s3",
Пример #6
0
class MyProvider(ResourceProvider):
    def create(self, props):
        return CreateResult("0", props)


class MyResource(Resource):
    foo: Output
    bar: Output
    baz: Output

    def __init__(self, name, props, opts=None):
        super().__init__(MyProvider(), name, props, opts)


unknown = Output.from_input(UNKNOWN if is_dry_run() else "foo")

a = MyResource(
    "a", {
        "foo": "foo",
        "bar": {
            "value": "foo",
            "unknown": unknown
        },
        "baz": ["foo", unknown],
    })


async def check_knowns():
    assert await a.foo.is_known()
    assert await a.bar["value"].is_known()
Пример #7
0
database = pulumi_gcp.sql.Database(
    "database", instance=cloud_sql_instance.name, name=config.require("db-name")
)

users = pulumi_gcp.sql.User(
    "users",
    name=config.require("db-name"),
    instance=cloud_sql_instance.name,
    password=config.require_secret("db-password"),
)

sql_instance_url = Output.concat(
    "postgres://",
    config.require("db-name"),
    ":",
    config.require_secret("db-password"),
    "@/",
    config.require("db-name"),
    "?host=/cloudsql/",
    cloud_sql_instance.connection_name,
)

cloud_run = pulumi_gcp.cloudrun.Service(
    "default-service",
    location=Config("gcp").require("region"),
    template=pulumi_gcp.cloudrun.ServiceTemplateArgs(
        metadata=ServiceTemplateMetadataArgs(
            annotations={
                "run.googleapis.com/cloudsql-instances": cloud_sql_instance.connection_name
            }
        ),
        spec=pulumi_gcp.cloudrun.ServiceTemplateSpecArgs(
Пример #8
0
    def __init__(self, name: str, opts: ResourceOptions = None):

        super().__init__('custom:resource:LibvirtHost', name, {}, opts)

        basename = f"{name}-kvm"
        username = "******"
        computername = "kvmhost"

        # Resource group, etc for the KVM host
        resource_group = resources.ResourceGroup(
            f"{basename}-rg", opts=ResourceOptions(parent=self))

        net = network.VirtualNetwork(f"{basename}-net",
                                     resource_group_name=resource_group.name,
                                     address_space=network.AddressSpaceArgs(
                                         address_prefixes=["10.0.0.0/16"], ),
                                     subnets=[
                                         network.SubnetArgs(
                                             name="default",
                                             address_prefix="10.0.1.0/24",
                                         )
                                     ],
                                     opts=ResourceOptions(parent=self))

        public_ip = network.PublicIPAddress(
            f"{basename}-ip",
            resource_group_name=resource_group.name,
            public_ip_allocation_method=network.IPAllocationMethod.DYNAMIC,
            opts=ResourceOptions(parent=self))

        network_iface = network.NetworkInterface(
            f"{basename}-nic",
            resource_group_name=resource_group.name,
            ip_configurations=[
                network.NetworkInterfaceIPConfigurationArgs(
                    name="serveripcfg",
                    subnet=network.SubnetArgs(id=net.subnets[0].id),
                    private_ip_allocation_method=network.IPAllocationMethod.
                    DYNAMIC,
                    public_ip_address=network.PublicIPAddressArgs(
                        id=public_ip.id),
                )
            ],
            opts=ResourceOptions(parent=self))

        # SSH key for accessing the Azure VM that is going to be the KVM host.
        ssh_key = tls.PrivateKey(f"{basename}-sshkey",
                                 algorithm="RSA",
                                 rsa_bits=4096,
                                 opts=ResourceOptions(parent=self))

        # Script to configure the kvm service on the kvm host
        init_script = f"""#!/bin/bash

        # Install KVM
        sudo apt update
        sudo apt-get -y install qemu-kvm libvirt-bin
        # hack to account for this bug: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1677398
        # Work around: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1677398/comments/42
        sudo sed -i '$ a security_driver = "none"' /etc/libvirt/qemu.conf
        sudo systemctl restart libvirt-bin

        """

        vm = compute.VirtualMachine(
            f"{basename}-vm",
            resource_group_name=resource_group.name,
            network_profile=compute.NetworkProfileArgs(network_interfaces=[
                compute.NetworkInterfaceReferenceArgs(id=network_iface.id),
            ], ),
            hardware_profile=compute.HardwareProfileArgs(
                vm_size=compute.VirtualMachineSizeTypes.STANDARD_D4S_V3),
            os_profile=compute.OSProfileArgs(
                computer_name=computername,
                admin_username=username,
                custom_data=base64.b64encode(
                    init_script.encode("ascii")).decode("ascii"),
                linux_configuration=compute.LinuxConfigurationArgs(
                    ssh=compute.SshConfigurationArgs(public_keys=[
                        compute.SshPublicKeyArgs(
                            key_data=ssh_key.public_key_openssh,
                            path=f"/home/{username}/.ssh/authorized_keys")
                    ]))),
            storage_profile=compute.StorageProfileArgs(
                os_disk=compute.OSDiskArgs(
                    create_option=compute.DiskCreateOptionTypes.FROM_IMAGE, ),
                image_reference=compute.ImageReferenceArgs(
                    publisher="canonical",
                    offer="UbuntuServer",
                    sku="18.04-LTS",
                    version="latest",
                ),
            ),
            opts=ResourceOptions(parent=self))

        # There's some delay between when Azure says the VM is ready and
        # when the KVM/qemu service can start accepting connections.
        # So, wait a bit to allow the KVM server to become fully ready.
        # But only do the wait if the VM has been provisioned (i.e. not during a preview).
        vm.provisioning_state.apply(lambda state: time.sleep(90))

        public_ip_addr = vm.id.apply(
            lambda _: network.get_public_ip_address_output(
                public_ip_address_name=public_ip.name,
                resource_group_name=resource_group.name))

        # Create/update the private key file for the SSH remote connection URI.
        def write_key_file(priv_key, key_file):
            if (os.path.exists(key_file)):
                os.chmod(key_file, 0o666)
            f = open(key_file, "w")
            f.write(priv_key)
            f.close()
            os.chmod(key_file, 0o400)

        key_file = f"{basename}_server.priv"
        ssh_key.private_key_pem.apply(
            lambda priv_key: write_key_file(priv_key, key_file))

        # Build the connection URI that is returned for use by the libvirt provider.
        # See https://libvirt.org/uri.html#URI_remote for details on the remote URI options
        self.libvirt_remote_uri = Output.concat(
            "qemu+ssh://", username, "@", public_ip_addr.ip_address,
            "/system?keyfile=./", key_file,
            "&socket=/var/run/libvirt/libvirt-sock&no_verify=1")

        # Return where the VM pool should be created.
        # In this case, the "vm pool" is simply placed under the KVM host user's home folder
        self.vm_pool_dir = f"/home/{username}/vms"

        # Other values for convenience to output useful information
        self.ip = public_ip_addr.ip_address
        self.username = username
        self.ssh_priv_key_file = key_file

        self.register_outputs({})
Пример #9
0
policy = Output.all(bucket.arn, sns_topic.arn, weapons_topic.arn,
                    dynamodb_table.arn,
                    chat_stream.arn).apply(lambda args: json.dumps({
                        "Version":
                        "2012-10-17",
                        "Statement": [
                            CREATE_CW_LOGS_POLICY,
                            {
                                "Effect": "Allow",
                                "Action": ["s3:Get*"],
                                "Resource": args[0]
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["sns:Publish"],
                                "Resource": [args[1], args[2]],
                            },
                            {
                                "Effect":
                                "Allow",
                                "Action": [
                                    "dynamodb:scan",
                                    "dynamodb:GetItem",
                                    "dynamodb:PutItem",
                                    "dynamodb:UpdateItem",
                                ],
                                "Resource":
                                args[3],
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["kinesis:PutRecord"],
                                "Resource": args[4],
                            },
                        ],
                    }))
Пример #10
0
    delete_data_disks_on_termination=True,
    delete_os_disk_on_termination=True,
    os_profile={
        "computer_name": name, # set name
        "admin_username": "******",
        "admin_password": "******",
        "custom_data": userdata,
    },
    os_profile_linux_config={
        "disable_password_authentication": False,
    },
    storage_os_disk={
        "create_option": "FromImage",
        "name": "os-disk-{}".format(name),
    },
    storage_image_reference={
        "publisher": "canonical",
        "offer": "UbuntuServer",
        "sku": "18.04-LTS",
        "version": "latest",
    })

  # prepare API call
  combined_output = Output.all(vm.id, public_ip.name,public_ip.resource_group_name)

  # create vm and apply public ip
  public_ip_addr = combined_output.apply(
    lambda lst: network.get_public_ip(name=lst[1], resource_group_name=lst[2]))
  
  pulumi.export('public_ip', public_ip.ip_address)
Пример #11
0
    wait_for_deployment=False)


def create_alias_record(target_domain, distribution):
    """
    Create a Route 53 Alias A record from the target domain name to the CloudFront distribution.
    """
    subdomain, parent_domain = get_domain_and_subdomain(target_domain)
    hzid = pulumi_aws.route53.get_zone(name=parent_domain).id
    return pulumi_aws.route53.Record(
        target_domain,
        name=subdomain,
        zone_id=hzid,
        type='A',
        aliases=[
            pulumi_aws.route53.RecordAliasArgs(
                name=distribution.domain_name,
                zone_id=distribution.hosted_zone_id,
                evaluate_target_health=True,
            )
        ])


alias_a_record = create_alias_record(target_domain, cdn)

# Export the bucket URL, bucket website endpoint, and the CloudFront distribution information.
export('content_bucket_url', Output.concat('s3://', content_bucket.bucket))
export('content_bucket_website_endpoint', content_bucket.website_endpoint)
export('cloudfront_domain', cdn.domain_name)
export('target_domain_endpoint', f'https://{target_domain}/')
Пример #12
0
    def __init__(self,
                 name: str,
                 args: WebServerArgs,
                 opts: ResourceOptions = None):
        super().__init__("custom:app:WebServer", name, {}, opts)

        child_opts = ResourceOptions(parent=self)

        public_ip = network.PublicIp(
            "server-ip",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            allocation_method="Dynamic",
            opts=child_opts,
        )

        network_iface = network.NetworkInterface(
            "server-nic",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            ip_configurations=[{
                "name": "webserveripcfg",
                "subnet_id": args.subnet.id,
                "private_ip_address_allocation": "Dynamic",
                "public_ip_address_id": public_ip.id,
            }],
            opts=child_opts,
        )

        userdata = """#!/bin/bash
        echo "Hello, World!" > index.html
        nohup python -m SimpleHTTPServer 80 &"""

        vm = compute.VirtualMachine(
            "server-vm",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            network_interface_ids=[network_iface.id],
            vm_size="Standard_A0",
            delete_data_disks_on_termination=True,
            delete_os_disk_on_termination=True,
            os_profile={
                "computer_name": "hostname",
                "admin_username": args.username,
                "admin_password": args.password,
                "custom_data": userdata,
            },
            os_profile_linux_config={"disable_password_authentication": False},
            storage_os_disk={
                "create_option": "FromImage",
                "name": "myosdisk1"
            },
            storage_image_reference={
                "publisher": "canonical",
                "offer": "UbuntuServer",
                "sku": "16.04-LTS",
                "version": "latest",
            },
            opts=child_opts,
        )

        # The public IP address is not allocated until the VM is running, so we wait
        # for that resource to create, and then lookup the IP address again to report
        # its public IP.
        combined_output = Output.all(vm.id, public_ip.name,
                                     public_ip.resource_group_name)
        self.public_ip_addr = combined_output.apply(
            lambda lst: network.get_public_ip(
                name=lst[1], resource_group_name=lst[2]).ip_address)
        self.register_outputs({})
Пример #13
0
    resource_group_name=resource_group.name,
    sku="basic",
    __opts__=ResourceOptions(parent=resource_group),
)


def docker_login_and_push(args):
    dockerclient.login(registry=args[0], username=args[1], password=args[2])
    for line in dockerclient.images.push(repository=DOCKER_REPO_URI,
                                         stream=True,
                                         decode=True):
        print(line)


# Push docker image to ACR
Output.all(acr.login_server, acr.admin_username,
           acr.admin_password).apply(docker_login_and_push)

# create Azure AD Application for AKS
app = Application("aks-app", name=PREFIX + "aks-app")

# create service principal for the application so AKS can act on behalf of the application
sp = ServicePrincipal(
    "aks-app-sp",
    application_id=app.application_id,
    __opts__=ResourceOptions(parent=app),
)

# create service principal password
sppwd = ServicePrincipalPassword(
    "aks-app-sp-pwd",
    service_principal_id=sp.id,
Пример #14
0
policy = Output.all(bucket.arn, xl_upgrades_queue.arn, chat_stream.arn,
                    sns_topic.arn).apply(lambda args: json.dumps({
                        "Version":
                        "2012-10-17",
                        "Id":
                        f"{MODULE_NAME}-policy",
                        "Statement": [
                            CREATE_CW_LOGS_POLICY,
                            {
                                "Effect": "Allow",
                                "Action": ["s3:*"],
                                "Resource": args[0]
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["s3:*"],
                                "Resource": f"{args[0]}/*"
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["sqs:*"],
                                "Resource": args[1]
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["kinesis:PutRecord"],
                                "Resource": args[2],
                            },
                            {
                                "Effect": "Allow",
                                "Action": ["sns:*"],
                                "Resource": args[3]
                            },
                        ],
                    }))
Пример #15
0
vpc_1_route_table_assoc = ec2.RouteTableAssociation(
    'vpc-1-route-table-assoc',
    route_table_id=eks_route_table.id,
    subnet_id=vpc_1_subnet.id)

kfp_cluster = eks.Cluster('pulumi-kubeflow-ml',
                          name='pulumi-kubeflow-ml',
                          role_arn=eks_role.arn,
                          vpc_config={
                              'subnet_ids': [vpc_0_subnet.id, vpc_1_subnet.id],
                              'security_group_ids': [eks_cluster_sg.id]
                          },
                          enabled_cluster_log_types=['api'])

k8s_info = Output.all(kfp_cluster.certificate_authority, kfp_cluster.endpoint,
                      kfp_cluster.name, kfp_cluster.id, kfp_cluster.role_arn)

k8s_config = k8s_info.apply(lambda info: """apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: {0}
    server: {1}
  name: {2} 
contexts:
- context:
    cluster: {2}
    user: aws
  name: {2}
current-context: {2}
kind: Config
preferences: {{}}
Пример #16
0
                                "test:index:MyResource",
                                name,
                                props={
                                    **args,
                                    "outprop": None,
                                },
                                opts=opts)


resA = MyResource("resA", {})
resB = MyResource("resB", {}, ResourceOptions(depends_on=[resA]))
resC = MyResource("resC", {
    "propA": resA.outprop,
    "propB": resB.outprop,
    "propC": "foo",
})
resD = MyResource(
    "resD", {
        "propA": Output.all([resA.outprop, resB.outprop
                             ]).apply(lambda l: f"{l}"),
        "propB": resC.outprop,
        "propC": "bar",
    })
resE = MyResource(
    "resE", {
        "propA": resC.outprop,
        "propB": Output.all([resA.outprop, resB.outprop
                             ]).apply(lambda l: f"{l}"),
        "propC": "baz",
    }, ResourceOptions(depends_on=[resD]))
Пример #17
0
    def __init__(self,
                 name: str,
                 props: SpokeProps,
                 opts: ResourceOptions = None):
        super().__init__('vdc:network:Spoke', name, {}, opts)

        # retrieve configuration
        dmz_ar = props.config.require('dmz_ar')
        hub_as = props.config.require('hub_as')
        hub_stem = props.config.require('hub_stem')
        sbs_ar = props.config.get('sbs_ar')
        spoke_ar = props.config.get('spoke_ar')
        spoke_as = props.config.require('spoke_as')

        # set vdc defaults
        vdc.resource_group_name = props.resource_group.name
        vdc.location = props.resource_group.location
        vdc.tags = props.tags
        vdc.self = self

        # Azure Virtual Network to be peered to the hub
        spoke = vdc.virtual_network(name, [spoke_as])

        # VNet Peering from the hub to spoke
        hub_spoke = vdc.vnet_peering(
            stem=hub_stem,
            virtual_network_name=props.hub.hub_name,
            peer=name,
            remote_virtual_network_id=spoke.id,
            allow_gateway_transit=True,
        )

        # VNet Peering from spoke to the hub
        spoke_hub = vdc.vnet_peering(
            stem=name,
            virtual_network_name=spoke.name,
            peer=hub_stem,
            remote_virtual_network_id=props.hub.hub_id,
            allow_forwarded_traffic=True,
            use_remote_gateways=True,
        )

        # provisioning of optional subnet and routes depends_on VNet Peerings
        # to avoid contention in the Azure control plane

        # AzureBastionSubnet (optional)
        if sbs_ar:
            spoke_sbs_sn = vdc.subnet_special(
                stem=f'{name}-ab',
                name='AzureBastionSubnet',
                virtual_network_name=spoke.name,
                address_prefix=sbs_ar,
                depends_on=[hub_spoke, spoke_hub],
            )

        # Route Table only to be associated with ordinary spoke subnets
        spoke_sn_rt = vdc.route_table(
            stem=f'{name}-sn',
            disable_bgp_route_propagation=True,
            depends_on=[hub_spoke, spoke_hub],
        )

        # provisioning of subnets depends_on VNet Peerings and Route Table
        # to avoid contention in the Azure control plane

        # only one spoke subnet is provisioned as an example, but many can be
        if spoke_ar:  # replace with a loop
            spoke_example_sn = vdc.subnet(
                stem=f'{name}-example',
                virtual_network_name=spoke.name,
                address_prefix=spoke_ar,
                depends_on=[spoke_sn_rt],
            )
            # associate all ordinary spoke subnets to Route Table
            spoke_example_sn_rta = vdc.subnet_route_table(
                stem=f'{name}-example',
                route_table_id=spoke_sn_rt.id,
                subnet_id=spoke_example_sn.id,
            )

        # as VNet Peering may not be specified as next_hop_type, a separate
        # address space in the hub from the firewall allows routes from the
        # spoke to remain unchanged when subnets are added in the hub

        # it is very important to ensure that there is never a route with an
        # address_prefix which covers the AzureFirewallSubnet.
        #ToDo check AzureFirewallManagementSubnet requirements

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'dmz-{name}', props.hub.hub_dmz_rt_name, spoke_as),
            (f'gw-{name}', props.hub.hub_gw_rt_name, spoke_as),
            (f'sn-{name}', props.hub.hub_sn_rt_name, spoke_as),
            (f'{name}-dg', spoke_sn_rt.name, '0.0.0.0/0'),
            (f'{name}-dmz', spoke_sn_rt.name, dmz_ar),
            (f'{name}-hub', spoke_sn_rt.name, hub_as),
        ]:
            vdc.route_to_virtual_appliance(
                stem=route[0],
                route_table_name=route[1],
                address_prefix=route[2],
                next_hop_in_ip_address=props.hub.hub_fw_ip,
            )

        combined_output = Output.all(spoke.name, spoke.id, spoke.subnets).apply

        self.spoke_id = spoke.id  # exported as informational
        self.spoke_name = spoke.name  # exported as informational
        self.spoke_subnets = spoke.subnets  # exported as informational
        self.register_outputs({})
Пример #18
0
    },
    node_config={
        'machine_type':
        NODE_MACHINE_TYPE,
        'oauth_scopes': [
            'https://www.googleapis.com/auth/compute',
            'https://www.googleapis.com/auth/devstorage.read_only',
            'https://www.googleapis.com/auth/logging.write',
            'https://www.googleapis.com/auth/monitoring'
        ],
    },
)

# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint,
                      k8s_cluster.master_auth)
k8s_config = k8s_info.apply(lambda info: """apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: {0}
    server: https://{1}
  name: {2}
contexts:
- context:
    cluster: {2}
    user: {2}
  name: {2}
current-context: {2}
kind: Config
preferences: {{}}
users:
Пример #19
0
 container_definitions=Output.all(
     repository.repository_url,
     activation.id,
     activation.activation_code,
     log_group.id,
 ).apply(lambda args: json.dumps([{
     "name":
     project_name,
     "image":
     args[0] + ":latest",
     "environment": [
         {
             "name": "ACTIVATION_ID",
             "value": args[1],
         },
         {
             "name": "ACTIVATION_CODE",
             "value": args[2],
         },
         {
             "name": "REGION",
             "value": region,
         },
     ],
     "logConfiguration": {
         "logDriver": "awslogs",
         "options": {
             "awslogs-group": args[3],
             "awslogs-region": region,
             "awslogs-stream-prefix": "fargate",
         },
     },
 }])),
Пример #20
0
policy = Output.all(bucket.arn, chat_stream.arn, kms_key.arn,
                    errors_queue.arn).apply(lambda args: json.dumps({
                        "Version":
                        "2012-10-17",
                        "Statement": [
                            CREATE_CW_LOGS_POLICY,
                            {
                                "Sid": "AllowS3",
                                "Effect": "Allow",
                                "Action": "s3:*",
                                "Resource": f"{args[0]}/*",
                            },
                            {
                                "Sid": "AllowKinesis",
                                "Effect": "Allow",
                                "Action": "kinesis:*",
                                "Resource": f"{args[1]}",
                            },
                            {
                                "Sid": "AllowKms",
                                "Effect": "Allow",
                                "Action": "kms:Decrypt",
                                "Resource": f"{args[2]}",
                            },
                            {
                                "Sid": "AllowSQS",
                                "Effect": "Allow",
                                "Action": "sqs:*",
                                "Resource": f"{args[3]}",
                            },
                        ],
                    }))
Пример #21
0
        start="2020-01-01",
        expiry="2030-01-01",
        container_name=args[2],
        permissions={
            "read": "true",
            "write": "false",
            "delete": "false",
            "list": "false",
            "add": "false",
            "create": "false"
        })
    return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}"


signed_blob_url = Output.all(storage_account.name,
                             storage_account.primary_connection_string,
                             storage_account.name, blob.name).apply(get_sas)

app_insights = appinsights.Insights("appservice-ai",
                                    resource_group_name=resource_group.name,
                                    location=resource_group.location,
                                    application_type="web")

sql_server = sql.SqlServer("appservice-sql",
                           resource_group_name=resource_group.name,
                           administrator_login=username,
                           administrator_login_password=pwd,
                           version="12.0")

database = sql.Database("appservice-db",
                        resource_group_name=resource_group.name,
Пример #22
0
        start="2020-01-01",
        expiry="2030-01-01",
        container_name=args[2],
        permissions=storage.GetAccountBlobContainerSASPermissionsArgs(
            read=True,
            write=False,
            delete=False,
            list=False,
            add=False,
            create=False,
        ))
    return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}"


http_signed_blob_url = Output.all(
    http_storage_account.name, http_storage_account.primary_connection_string,
    http_container.name, http_zib_blob.name).apply(get_sas)

http_plan = appservice.Plan(
    "http",
    resource_group_name=resource_group.name,
    kind="Linux",
    sku=appservice.PlanSkuArgs(tier="Dynamic", size="Y1"),
    reserved=True,
)

http_function_app = appservice.FunctionApp(
    "http",
    resource_group_name=resource_group.name,
    app_service_plan_id=http_plan.id,
    storage_account_name=http_storage_account.name,
Пример #23
0
                                          InvalidationBatch={
                                              'Paths': {
                                                  'Quantity': 1,
                                                  'Items': ['/*'],
                                              },
                                              'CallerReference':
                                              str(time()).replace(".", "")
                                          })
    return response


logs_bucket = s3.Bucket(LOGS_BUCKET_NAME,
                        bucket=LOGS_BUCKET_NAME,
                        acl="private")

wwwroot_bucket = create_s3website_bucket(WWWROOT_BUCKET_NAME)
ssl_certificate = acm.get_certificate(domain=WEBSITE_DOMAIN_NAME,
                                      statuses=["ISSUED"])
s3_distribution = create_cloudfront_distribution_for_s3website(
    wwwroot_bucket, logs_bucket, ssl_certificate)
create_alias_record(WEBSITE_DOMAIN_NAME, s3_distribution)

# Added a cache invalidation instead of decreasing default_ttl of a distribution cache.
# However, invalidations will inflict additional costs after 1000 paths
s3_distribution.id.apply(invalidate_distribution_cache)

export("s3_bucket_url", Output.concat("s3://", wwwroot_bucket.bucket))
export("s3_bucket_website_endpoint", wwwroot_bucket.website_endpoint)
export("cloudfront_domain_name", s3_distribution.domain_name)
export("route53_endpoint", f"https://{WEBSITE_DOMAIN_NAME}/")
Пример #24
0
vpc = ec2.Vpc('test',
              cidr_block="10.11.0.0/16",
              enable_dns_hostnames=True,
              enable_dns_support=True)

internet_gateway = ec2.InternetGateway('test', vpc_id=vpc.id)

route_table = ec2.RouteTable('test',
                             vpc_id=vpc.id,
                             routes=[{
                                 "cidrBlock": "0.0.0.0/0",
                                 "gatewayId": internet_gateway.id
                             }])

zones = Output.from_input(get_availability_zones())
zone_names = zones.apply(lambda zs: zs.names)

subnet0 = ec2.Subnet(
    "test0",
    vpc_id=vpc.id,
    availability_zone=zone_names.apply(lambda names: names[0]),
    cidr_block="10.11.0.0/24",
    map_public_ip_on_launch=True)

subnet1 = ec2.Subnet(
    "test1",
    vpc_id=vpc.id,
    availability_zone=zone_names.apply(lambda names: names[1]),
    cidr_block="10.11.1.0/24",
    map_public_ip_on_launch=True)
Пример #25
0
    def __init__(self, name: str, props: HubProps, opts: ResourceOptions=None):
        super().__init__('vdc:network:Hub', name, {}, opts)

        # retrieve configuration
        dmz_ar = props.config.require('dmz_ar')
        fwm_ar = props.config.get('fwm_ar')
        fws_ar = props.config.require('fws_ar')
        fwz_as = props.config.require('fwz_as')
        gws_ar = props.config.require('gws_ar')
        hbs_ar = props.config.get('hbs_ar')
        hub_ar = props.config.get('hub_ar')
        hub_as = props.config.require('hub_as')

        # set vdc defaults
        vdc.resource_group_name = props.resource_group.name
        vdc.location = props.resource_group.location
        vdc.tags = props.tags
        vdc.self = self

        # Azure Virtual Network to which spokes will be peered
        # separate address spaces to simplify custom routing
        hub = vdc.virtual_network(name, [fwz_as, hub_as])

        # DMZ subnet
        hub_dmz_sn = vdc.subnet_special( #ToDo add NSG
            stem = f'{name}-dmz',
            name = 'DMZ', # name not required but preferred
            virtual_network_name = hub.name,
            address_prefix = dmz_ar,
        )

        # AzureFirewallSubnet
        hub_fw_sn = vdc.subnet_special(
            stem = f'{name}-fw',
            name = 'AzureFirewallSubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = fws_ar,
        )

        # GatewaySubnet
        hub_gw_sn = vdc.subnet_special(
            stem = f'{name}-gw',
            name = 'GatewaySubnet', # name required
            virtual_network_name = hub.name,
            address_prefix = gws_ar,
        )

        # provisioning of Gateways and Firewall depends_on subnets
        # to avoid contention in the Azure control plane

        # VPN Gateway
        hub_vpn_gw = vdc.vpn_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # ExpressRoute Gateway
        hub_er_gw = vdc.expressroute_gateway(
            stem = name,
            subnet_id = hub_gw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # Azure Firewall
        hub_fw = vdc.firewall(
            stem = name,
            subnet_id = hub_fw_sn.id,
            depends_on=[hub_dmz_sn, hub_fw_sn, hub_gw_sn],
        )

        # provisioning of optional subnets depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # AzureBastionSubnet (optional)
        if hbs_ar:
            hub_ab_sn = vdc.subnet_special( #ToDo add NSG if required
                stem = f'{name}-ab',
                name = 'AzureBastionSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = hbs_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # AzureFirewallManagementSubnet (optional)
        if fwm_ar:
            hub_fwm_sn = vdc.subnet_special(
                stem = f'{name}-fwm',
                name = 'AzureFirewallManagementSubnet', # name required
                virtual_network_name = hub.name,
                address_prefix = fwm_ar,
                depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
            )

        # work around https://github.com/pulumi/pulumi/issues/4040
        hub_fw_ip = hub_fw.ip_configurations.apply(
            lambda ipc: ipc[0].get('private_ip_address')
        )

        # provisioning of Route Tables depends_on Gateways and Firewall
        # to avoid contention in the Azure control plane

        # Route Table only to be associated with the GatewaySubnet
        hub_gw_rt = vdc.route_table(
            stem = f'{name}-gw',
            disable_bgp_route_propagation = False,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate GatewaySubnet with Route Table
        hub_gw_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-gw',
            route_table_id = hub_gw_rt.id,
            subnet_id = hub_gw_sn.id,
        )

        # Route Table only to be associated with DMZ subnet
        hub_dmz_rt = vdc.route_table(
            stem = f'{name}-dmz',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # associate DMZ subnet with Route Table
        hub_dmz_sn_rta = vdc.subnet_route_table(
            stem = f'{name}-dmz',
            route_table_id = hub_dmz_rt.id,
            subnet_id = hub_dmz_sn.id,
        )

        # Route Table only to be associated with ordinary subnets in hub
        hub_sn_rt = vdc.route_table(
            stem = f'{name}-sn',
            disable_bgp_route_propagation = True,
            depends_on=[hub_er_gw, hub_fw, hub_vpn_gw],
        )

        # protect intra-GatewaySubnet traffic from being redirected
        vdc.route_to_virtual_network(
            stem = f'gw-gw',
            route_table_name = hub_gw_rt.name,
            address_prefix = gws_ar,
        )

        # partially or fully invalidate system routes to redirect traffic
        for route in [
            (f'gw-dmz', hub_gw_rt.name, dmz_ar),
            (f'gw-hub', hub_gw_rt.name, hub_as),
            (f'dmz-dg', hub_dmz_rt.name, '0.0.0.0/0'),
            (f'dmz-dmz', hub_dmz_rt.name, dmz_ar),
            (f'dmz-hub', hub_dmz_rt.name, hub_as),
            (f'sn-dg', hub_sn_rt.name, '0.0.0.0/0'),
            (f'sn-dmz', hub_sn_rt.name, dmz_ar),
            (f'sn-gw', hub_sn_rt.name, gws_ar),
        ]:
            vdc.route_to_virtual_appliance(
                stem = route[0],
                route_table_name = route[1],
                address_prefix = route[2],
                next_hop_in_ip_address = hub_fw_ip,
            )

        # VNet Peering between stacks using StackReference
        peer = props.config.get('peer')
        if peer:
            org = props.config.require('org')
            project = get_project()
            peer_stack = StackReference(f'{org}/{project}/{peer}')
            peer_hub_id = peer_stack.get_output('hub_id')
            peer_fw_ip = peer_stack.get_output('hub_fw_ip')
            peer_dmz_ar = peer_stack.get_output('dmz_ar') 
            peer_hub_as = peer_stack.get_output('hub_as')

            # VNet Peering (Global) in one direction from stack to peer
            hub_hub = vdc.vnet_peering(
                stem = props.stack,
                virtual_network_name = hub.name,
                peer = peer,
                remote_virtual_network_id = peer_hub_id,
                allow_forwarded_traffic = True,
                allow_gateway_transit = False, # as both hubs have gateways
            )

            # need to invalidate system routes created by Global VNet Peering
            for route in [
                (f'dmz-{peer}-dmz', hub_dmz_rt.name, peer_dmz_ar),
                (f'dmz-{peer}-hub', hub_dmz_rt.name, peer_hub_as),
                (f'gw-{peer}-dmz', hub_gw_rt.name, peer_dmz_ar),
                (f'gw-{peer}-hub', hub_gw_rt.name, peer_hub_as),
                (f'sn-{peer}-dmz', hub_sn_rt.name, peer_dmz_ar),
                (f'sn-{peer}-hub', hub_sn_rt.name, peer_hub_as),
            ]:
                vdc.route_to_virtual_appliance(
                    stem = route[0],
                    route_table_name = route[1],
                    address_prefix = route[2],
                    next_hop_in_ip_address = peer_fw_ip,
                )
        
        # provisioning of subnets depends_on Route Table (Gateways & Firewall)
        # to avoid contention in the Azure control plane

        # only one shared subnet is provisioned as an example, but many can be
        if hub_ar: #ToDo replace with loop
            hub_example_sn = vdc.subnet( #ToDo add NSG
                stem = f'{name}-example',
                virtual_network_name = hub.name,
                address_prefix = hub_ar,
                depends_on=[hub_sn_rt],
            )

            # associate all hub shared services subnets to Route Table        
            hub_example_sn_rta = vdc.subnet_route_table(
                stem = f'{name}-example',
                route_table_id = hub_sn_rt.id,
                subnet_id = hub_example_sn.id,
            )

        combined_output = Output.all(
            hub_dmz_rt.name,
            hub_er_gw,
            hub_fw,
            hub_fw_ip,
            hub_gw_rt.name,
            hub.id,
            hub.name,
            hub_sn_rt.name,
            hub.subnets,
            hub_vpn_gw,
        ).apply

        self.hub_dmz_rt_name = hub_dmz_rt.name # used to add routes to spokes
        self.hub_er_gw = hub_er_gw # needed prior to VNet Peering from spokes
        self.hub_fw = hub_fw # needed prior to VNet Peering from spokes
        self.hub_fw_ip = hub_fw_ip # used to construct routes
        self.hub_gw_rt_name = hub_gw_rt.name # used to add routes to spokes
        self.hub_id = hub.id # exported and used for peering
        self.hub_name = hub.name # exported and used for peering
        self.hub_sn_rt_name = hub_sn_rt.name # used to add routes to spokes
        self.hub_subnets = hub.subnets # exported as informational
        self.hub_vpn_gw = hub_vpn_gw # needed prior to VNet Peering from spokes
        self.register_outputs({})
Пример #26
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pulumi import Output, CustomResource


class MyResource(CustomResource):
    nested_numbers: Output[dict]

    def __init__(self, name):
        CustomResource.__init__(self, "test:index:MyResource", name, {
            "nested_numbers": None,
        })


class SumResource(CustomResource):
    sum: Output[int]

    def __init__(self, name, sum):
        CustomResource.__init__(self, "test:index:SumResource", name, {
            "sum": sum,
        })


res1 = MyResource("testResource1")
res2 = MyResource("testResource2")

sum = Output.from_input(
    res1.nested_numbers).apply(lambda d: d["foo"]["bar"] + d["baz"])
sumRes = SumResource("sumResource", sum)
                    "apiVersion": "client.authentication.k8s.io/v1alpha1",
                    "command": "aws-iam-authenticator",
                    "args": [
                        "token",
                        "-i",
                        f"{cluster_name}",
                    ],
                },
            },
        }],
    })


# Create the KubeConfig Structure as per https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
kubeconfig = Output.all(
    cluster.endpoint, cluster.certificate_authority["data"],
    cluster.name).apply(
        lambda args: generateKubeconfig(args[0], args[1], args[2]))

# Declare a provider using the KubeConfig we created
# This will be used to interact with the EKS cluster
k8s_provider = Provider("k8s-provider", kubeconfig=kubeconfig)

# Create a Namespace object https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
ns = Namespace("app-ns",
               metadata={
                   "name": "joe-duffy",
               },
               opts=ResourceOptions(provider=k8s_provider))

app_labels = {"app": "iac-workshop"}
app_deployment = Deployment("app-dep",
            },
            "spec": {
                "containers": [{
                    "name":
                    "iac-workshop",
                    "image":
                    "gcr.io/google-samples/kubernetes-bootcamp:v1",
                }],
            },
        },
    },
    opts=ResourceOptions(provider=k8s_provider))

service = Service("app-service",
                  metadata={
                      "namespace": ns.metadata["name"],
                      "labels": app_labels
                  },
                  spec={
                      "ports": [{
                          "port": 80,
                          "target_port": 8080,
                      }],
                      "selector": app_labels,
                      "type": "LoadBalancer",
                  },
                  opts=ResourceOptions(provider=k8s_provider))

export('url', Output.all(service.status['load_balancer']['ingress'][0]['hostname'], service.spec['ports'][0]['port']) \
       .apply(lambda args: f"http://{args[0]}:{round(args[1])}"))
Пример #29
0
    },
    node_config={
        'machine_type':
        NODE_MACHINE_TYPE,
        'oauth_scopes': [
            'https://www.googleapis.com/auth/compute',
            'https://www.googleapis.com/auth/devstorage.read_only',
            'https://www.googleapis.com/auth/logging.write',
            'https://www.googleapis.com/auth/monitoring'
        ],
    },
)

# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint,
                      k8s_cluster.master_auth)
k8s_config = k8s_info.apply(lambda info: """apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: {0}
    server: https://{1}
  name: {2}
contexts:
- context:
    cluster: {2}
    user: {2}
  name: {2}
current-context: {2}
kind: Config
preferences: {{}}
users:
Пример #30
0
 async def test_unwrap_list_dict(self):
     x = Output.from_input(["hello", {"foo": Output.from_input("bar")}])
     x_val = await x.future()
     self.assertEqual(x_val, ["hello", {"foo": "bar"}])