Пример #1
0
def eks_role_policy(oidc_provider, namespace, service_account):
    return Output.from_input(
        {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Effect": "Allow",
                    "Principal": {
                        "Federated": oidc_provider.arn,
                    },
                    "Action": "sts:AssumeRoleWithWebIdentity",
                    "Condition": {
                        "StringEquals": {
                            Output.concat(oidc_provider.url, ":sub"): Output.concat(
                                "system:serviceaccount:",
                                namespace,
                                ":",
                                service_account,
                            )
                        }
                    },
                }
            ],
        }
    ).apply(json.dumps)
Пример #2
0
def create_dns_record(name: str,
                      domain: str,
                      zone_id: str,
                      value: Output,
                      verify="awverify"):
    record_name = ".".join([name, domain]).strip(".")

    main_record = Record(
        record_name if name != "" else domain,
        name=name if name != "" else domain,
        ttl=1,
        type="CNAME",
        zone_id=zone_id,
        proxied=True,
        value=value,
    )

    verify_record = Record(
        "{}.{}".format(verify, record_name),
        name=".".join([verify, name]).strip("."),
        ttl=1,
        type="CNAME",
        zone_id=zone_id,
        proxied=False,
        value=Output.concat(verify, ".", value),
    )

    return DnsRecord(main_record, verify_record)
Пример #3
0
    def __init__(
        self,
        name: str,
        args: CrossProjectCloudRunAccessArgs,
        opts: ResourceOptions = None,
    ):
        super().__init__("unopac:modules:CrossProjectCloudRunAccess", name, {},
                         opts)

        cloudrun_serviceaccount_name = Output.concat(
            "serviceAccount:service-",
            args.target_project_number,
            "@serverless-robot-prod.iam.gserviceaccount.com",
        )

        self.bucket_policy = storage.BucketIAMMember(
            resource_name="AccessToContainerRegistry",
            bucket=args.registry_bucketname,
            member=cloudrun_serviceaccount_name,
            role="roles/storage.admin",
            opts=opts,
        )
Пример #4
0

class FinalResource(CustomResource):
    number: Output[str]

    def __init__(self, name, number):
        CustomResource.__init__(self, "test:index:FinalResource", name, {
            "number": number,
        })


def assert_eq(lhs, rhs):
    assert lhs == rhs


res1 = MyResource("testResource1")
res2 = MyResource("testResource2")

res1.number.apply(lambda n: assert_eq(n, 2))
res2.number.apply(lambda n: assert_eq(n, 3))

# Output.all combines a list of outputs into an output of a list.
resSum = Output.all(res1.number, res2.number).apply(lambda l: l[0] + l[1])
FinalResource("testResource3", resSum)

# Test additional Output helpers
hello_world = Output.concat(
    "Hello ",
    Output.from_input("world!")).apply(lambda s: assert_eq(s, "Hello world!"))
export("helloworld", hello_world)
Пример #5
0
blob_sas = storage.get_account_blob_container_sas_output(
    connection_string=storage_account.primary_connection_string,
    start="2020-01-01",
    expiry="2030-01-01",
    container_name=container_storage_account.name,
    permissions=storage.GetAccountBlobContainerSASPermissionsArgs(
        read=True,
        write=False,
        delete=False,
        list=False,
        add=False,
        create=False))

signed_blob_url = Output.concat("https://", storage_account.name,
                                ".blob.core.windows.net/",
                                storage_account.name, "/", blob.name,
                                blob_sas.sas)

secret = keyvault.Secret("deployment-zip",
                         key_vault_id=vault.id,
                         value=signed_blob_url)

secret_uri = Output.all(vault.vault_uri, secret.name, secret.version) \
    .apply(lambda args: f"{args[0]}secrets/{args[1]}/{args[2]}")

app = appservice.AppService(
    "app",
    resource_group_name=resource_group.name,
    app_service_plan_id=app_service_plan.id,
    identity=appservice.AppServiceIdentityArgs(type="SystemAssigned", ),
    app_settings={
Пример #6
0
    skip_final_snapshot=True,
    storage_type="gp2",
    username="******",
    vpc_security_group_ids=[db_secgrp.id],
    opts=ResourceOptions(protect=protect_persistent_storage),
)
airflow_db_cname = aws.route53.Record("airflow-postgres-cname",
                                      name="airflow-postgres",
                                      zone_id=db_zone.zone_id,
                                      type='CNAME',
                                      ttl=60,
                                      records=[airflow_db.address])
export(f"airflow-db-endpoint", airflow_db.endpoint)
export(
    f"airflow-db-alias",
    Output.concat(f"airflow-postgres.", db_zone.name, ":",
                  airflow_db.port.apply(lambda x: str(x))))

redata_db = aws.rds.Instance(
    "redata-postgres",
    allocated_storage=20,
    db_subnet_group_name=rds_subnetgroup.name,
    engine="postgres",
    engine_version="12.5",
    instance_class="db.t2.micro",
    name="redata",
    password=redata_db_password,
    port=5432,
    skip_final_snapshot=True,
    storage_type="gp2",
    username="******",
    vpc_security_group_ids=[db_secgrp.id],
Пример #7
0
    },
    # CloudFront typically takes 15 minutes to fully deploy a new distribution.
    # Skip waiting for that to complete.
    wait_for_deployment=False)


def create_alias_record(target_domain, distribution):
    """
    Create a Route 53 Alias A record from the target domain name to the CloudFront distribution.
    """
    subdomain, parent_domain = get_domain_and_subdomain(target_domain)
    hzid = pulumi_aws.route53.get_zone(name=parent_domain).id
    return pulumi_aws.route53.Record(target_domain,
                                     name=subdomain,
                                     zone_id=hzid,
                                     type='A',
                                     aliases=[{
                                         'name': distribution.domain_name,
                                         'zoneId': distribution.hosted_zone_id,
                                         'evaluateTargetHealth': True
                                     }])


alias_a_record = create_alias_record(target_domain, cdn)

# Export the bucket URL, bucket website endpoint, and the CloudFront distribution information.
export('content_bucket_url', Output.concat('s3://', content_bucket.bucket))
export('content_bucket_website_endpoint', content_bucket.website_endpoint)
export('cloudfront_domain', cdn.domain_name)
export('target_domain_endpoint', f'https://{target_domain}/')
Пример #8
0
pulumi.export('storageaccount', account.name)

# Export the Consumption Plan
pulumi.export('consumptionplan', plan.name)

# List of storage account keys
storageAccountKeys = pulumi.Output.all(
    resource_group.name,
    account.name).apply(lambda args: storage.list_storage_account_keys(
        resource_group_name=args[0], account_name=args[1]))
# Primary storage account key
primaryStorageKey = storageAccountKeys.apply(
    lambda accountKeys: accountKeys.keys[0].value)
# Build a storage connection string out of it:
storageConnectionString = Output.concat(
    "DefaultEndpointsProtocol=https;AccountName=", account.name,
    ";AccountKey=", primaryStorageKey)

# Export the storageacountkey as a secret
pulumi.export("storageaccountkeys", pulumi.Output.secret(storageAccountKeys))
# Export the primarystoragekey as a secret
pulumi.export('primarystoragekey', pulumi.Output.secret(primaryStorageKey))
# Export the storageconnectionstring  as a secret
pulumi.export('storageconnectionstring',
              pulumi.Output.secret(storageConnectionString))

# Create the functionapp
app = web.WebApp(
    "functionapp",
    resource_group_name=resource_group.name,
    location=resource_group.location,
Пример #9
0
                                          InvalidationBatch={
                                              'Paths': {
                                                  'Quantity': 1,
                                                  'Items': ['/*'],
                                              },
                                              'CallerReference':
                                              str(time()).replace(".", "")
                                          })
    return response


logs_bucket = s3.Bucket(LOGS_BUCKET_NAME,
                        bucket=LOGS_BUCKET_NAME,
                        acl="private")

wwwroot_bucket = create_s3website_bucket(WWWROOT_BUCKET_NAME)
ssl_certificate = acm.get_certificate(domain=WEBSITE_DOMAIN_NAME,
                                      statuses=["ISSUED"])
s3_distribution = create_cloudfront_distribution_for_s3website(
    wwwroot_bucket, logs_bucket, ssl_certificate)
create_alias_record(WEBSITE_DOMAIN_NAME, s3_distribution)

# Added a cache invalidation instead of decreasing default_ttl of a distribution cache.
# However, invalidations will inflict additional costs after 1000 paths
s3_distribution.id.apply(invalidate_distribution_cache)

export("s3_bucket_url", Output.concat("s3://", wwwroot_bucket.bucket))
export("s3_bucket_website_endpoint", wwwroot_bucket.website_endpoint)
export("cloudfront_domain_name", s3_distribution.domain_name)
export("route53_endpoint", f"https://{WEBSITE_DOMAIN_NAME}/")
Пример #10
0
# Create a small linux volume that contains a tiny (and thus fast to download) linux.
vm_vol = libvirt.Volume(
    f"{basename}-linux",
    pool=vm_pool.name,
    source=
    "http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img",
    format="qcow2",
    opts=ResourceOptions(provider=libvirt_provider))
export("libvirt volume name", vm_vol.name)

# Create a VM using the volume created above.
vm = libvirt.Domain(f"{basename}-vm",
                    memory=512,
                    vcpu=1,
                    disks=[libvirt.DomainDiskArgs(volume_id=vm_vol.id)],
                    network_interfaces=[
                        libvirt.DomainNetworkInterfaceArgs(
                            network_name="default",
                            wait_for_lease=True,
                        )
                    ],
                    opts=ResourceOptions(provider=libvirt_provider))
export("libvirt VM name", vm.name)

# Export a command that can be used to see that there is indeed a VM running under KVM on the KVM host.
test_cmd = Output.concat('echo virsh list | ssh -i ',
                         libvirt_server.ssh_priv_key_file, ' ',
                         libvirt_server.username, '@', libvirt_server.ip)
export("Check the libvirt VM on the KVM host", test_cmd)
Пример #11
0
    def __init__(self, name: str, opts: ResourceOptions = None):

        super().__init__('custom:resource:LibvirtHost', name, {}, opts)

        basename = f"{name}-kvm"
        username = "******"
        computername = "kvmhost"

        # Resource group, etc for the KVM host
        resource_group = resources.ResourceGroup(
            f"{basename}-rg", opts=ResourceOptions(parent=self))

        net = network.VirtualNetwork(f"{basename}-net",
                                     resource_group_name=resource_group.name,
                                     address_space=network.AddressSpaceArgs(
                                         address_prefixes=["10.0.0.0/16"], ),
                                     subnets=[
                                         network.SubnetArgs(
                                             name="default",
                                             address_prefix="10.0.1.0/24",
                                         )
                                     ],
                                     opts=ResourceOptions(parent=self))

        public_ip = network.PublicIPAddress(
            f"{basename}-ip",
            resource_group_name=resource_group.name,
            public_ip_allocation_method=network.IPAllocationMethod.DYNAMIC,
            opts=ResourceOptions(parent=self))

        network_iface = network.NetworkInterface(
            f"{basename}-nic",
            resource_group_name=resource_group.name,
            ip_configurations=[
                network.NetworkInterfaceIPConfigurationArgs(
                    name="serveripcfg",
                    subnet=network.SubnetArgs(id=net.subnets[0].id),
                    private_ip_allocation_method=network.IPAllocationMethod.
                    DYNAMIC,
                    public_ip_address=network.PublicIPAddressArgs(
                        id=public_ip.id),
                )
            ],
            opts=ResourceOptions(parent=self))

        # SSH key for accessing the Azure VM that is going to be the KVM host.
        ssh_key = tls.PrivateKey(f"{basename}-sshkey",
                                 algorithm="RSA",
                                 rsa_bits=4096,
                                 opts=ResourceOptions(parent=self))

        # Script to configure the kvm service on the kvm host
        init_script = f"""#!/bin/bash

        # Install KVM
        sudo apt update
        sudo apt-get -y install qemu-kvm libvirt-bin
        # hack to account for this bug: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1677398
        # Work around: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1677398/comments/42
        sudo sed -i '$ a security_driver = "none"' /etc/libvirt/qemu.conf
        sudo systemctl restart libvirt-bin

        """

        vm = compute.VirtualMachine(
            f"{basename}-vm",
            resource_group_name=resource_group.name,
            network_profile=compute.NetworkProfileArgs(network_interfaces=[
                compute.NetworkInterfaceReferenceArgs(id=network_iface.id),
            ], ),
            hardware_profile=compute.HardwareProfileArgs(
                vm_size=compute.VirtualMachineSizeTypes.STANDARD_D4S_V3),
            os_profile=compute.OSProfileArgs(
                computer_name=computername,
                admin_username=username,
                custom_data=base64.b64encode(
                    init_script.encode("ascii")).decode("ascii"),
                linux_configuration=compute.LinuxConfigurationArgs(
                    ssh=compute.SshConfigurationArgs(public_keys=[
                        compute.SshPublicKeyArgs(
                            key_data=ssh_key.public_key_openssh,
                            path=f"/home/{username}/.ssh/authorized_keys")
                    ]))),
            storage_profile=compute.StorageProfileArgs(
                os_disk=compute.OSDiskArgs(
                    create_option=compute.DiskCreateOptionTypes.FROM_IMAGE, ),
                image_reference=compute.ImageReferenceArgs(
                    publisher="canonical",
                    offer="UbuntuServer",
                    sku="18.04-LTS",
                    version="latest",
                ),
            ),
            opts=ResourceOptions(parent=self))

        # There's some delay between when Azure says the VM is ready and
        # when the KVM/qemu service can start accepting connections.
        # So, wait a bit to allow the KVM server to become fully ready.
        # But only do the wait if the VM has been provisioned (i.e. not during a preview).
        vm.provisioning_state.apply(lambda state: time.sleep(90))

        public_ip_addr = vm.id.apply(
            lambda _: network.get_public_ip_address_output(
                public_ip_address_name=public_ip.name,
                resource_group_name=resource_group.name))

        # Create/update the private key file for the SSH remote connection URI.
        def write_key_file(priv_key, key_file):
            if (os.path.exists(key_file)):
                os.chmod(key_file, 0o666)
            f = open(key_file, "w")
            f.write(priv_key)
            f.close()
            os.chmod(key_file, 0o400)

        key_file = f"{basename}_server.priv"
        ssh_key.private_key_pem.apply(
            lambda priv_key: write_key_file(priv_key, key_file))

        # Build the connection URI that is returned for use by the libvirt provider.
        # See https://libvirt.org/uri.html#URI_remote for details on the remote URI options
        self.libvirt_remote_uri = Output.concat(
            "qemu+ssh://", username, "@", public_ip_addr.ip_address,
            "/system?keyfile=./", key_file,
            "&socket=/var/run/libvirt/libvirt-sock&no_verify=1")

        # Return where the VM pool should be created.
        # In this case, the "vm pool" is simply placed under the KVM host user's home folder
        self.vm_pool_dir = f"/home/{username}/vms"

        # Other values for convenience to output useful information
        self.ip = public_ip_addr.ip_address
        self.username = username
        self.ssh_priv_key_file = key_file

        self.register_outputs({})
Пример #12
0
for replica in range(0, DROPLET_COUNT):
    instance_name = "web-%s" % replica
    name_tag = do.Tag(instance_name)
    do.Droplet(
        instance_name,
        image="ubuntu-20-04-x64",
        region=region,
        size="512mb",
        tags=[name_tag.id, droplet_type_tag.id],
        user_data=userdata,
    )

loadbalancer = do.LoadBalancer(
    "public",
    droplet_tag=droplet_type_tag.name,
    forwarding_rules=[{
        "entry_port": 80,
        "entry_protocol": "http",
        "target_port": 80,
        "target_protocol": "http",
    }],
    healthcheck={
        "port": 80,
        "protocol": "tcp",
    },
    region=region,
)

endpoint = Output.concat("http://", loadbalancer.ip)
export("endpoint", endpoint)
Пример #13
0
    def __init__(self,
                 name: str,
                 args: BucketWithNotificationArgs,
                 opts: ResourceOptions = None):

        super().__init__("unopac:modules:BucketWithNotification", name, {},
                         opts)

        log.info(
            f"Trying to get project default service account for new project with {args.gcp_project}"
        )

        self.bucket = storage.Bucket(
            args.bucket_resource_name,
            project=args.gcp_project.project_id,
            opts=opts,
        )

        gcs_account = args.gcp_project.project_id.apply(
            lambda project_id: self._get_storage_project_service_account(
                project_id, opts))

        self.topic = pubsub.Topic(
            f"{args.bucket_resource_name}-{args.topic_resource_name_suffix}",
            project=args.gcp_project.project_id,
            opts=opts,
        )

        self.gcs_default_project_service_account_topicbindingtopic_iambinding = (
            pubsub.TopicIAMBinding(
                f"{name}-default-project-service-account-topic-iam-binding",
                topic=self.topic.id,
                role="roles/pubsub.publisher",
                members=[f"serviceAccount:{gcs_account.email_address}"],
                opts=opts,
            ))

        self.pubsub_accountcreator_policy_binding = projects.IAMMember(
            resource_name=
            "project-service-account-pubsub-serviceAccount-tokenCreator",
            project=args.gcp_project.project_id,
            member=Output.concat(
                "serviceAccount:service-",
                args.gcp_project.number,
                "@gcp-sa-pubsub.iam.gserviceaccount.com",
            ),
            role="roles/iam.serviceAccountTokenCreator",
        )

        self.notification = storage.Notification(
            f"{args.bucket_resource_name}-notification",
            bucket=self.bucket.name,
            payload_format="JSON_API_V1",
            topic=self.topic.id,
            event_types=[
                "OBJECT_FINALIZE",
                "OBJECT_METADATA_UPDATE",
            ],
            custom_attributes={
                "new-attribute": "new-attribute-value",
            },
            opts=opts,
        )
Пример #14
0
# Route 3 start
route_3 = """
  /prod:
    post:
      responses: {}
      x-amazon-apigateway-integration:
        httpMethod: "POST"
        passthroughBehavior: "when_no_match"
        type: "AWS_PROXY"
        uri: """

# We are creating the body via apply & concat: https://www.pulumi.com/docs/intro/concepts/inputs-outputs/#outputs-and-strings
# invoke_arn: https://www.pulumi.com/docs/reference/pkg/aws/lambda/function/#invoke_arn_python
# You can pass in any # of lambda functions on 1 api gateway with this solution
fullbody = Output.concat(f'{first_part_swagger_openapi}', f'{route_1}',
                         api_airtable.invoke_arn, f'{route_2}',
                         api_airtable.invoke_arn, f'{route_3}',
                         api_airtable.invoke_arn)

pulumi.export("fullbody", fullbody)
api_gateway = apigateway.RestApi(
    resource_name='api-gateway',
    api_key_source='HEADER',
    body=fullbody,
    description="This is the hello python apigateway with lambda integration",
)

api_gateway_deployment = apigateway.Deployment(
    'api-gateway-deployment',
    rest_api=api_gateway.id,
    description="This is the apigateway deployment",
    opts=pulumi.ResourceOptions(depends_on=[api_gateway]))
Пример #15
0
  version: "2021-03-29T15:07:58Z"
  title: "marv"
basePath: "/dev"
schemes:
  - "https"
paths:
  /test:
    post:
      responses: {}
      x-amazon-apigateway-integration:
        httpMethod: "POST"
        passthroughBehavior: "when_no_match"
        type: "AWS_PROXY"
        uri: """

final_output = Output.concat(f'{first_part}', api_airtable.invoke_arn)
api_gateway = apigateway.RestApi(
    'api-gateway',
    body=final_output,
    api_key_source='HEADER',
    description="This is the hello python apigateway with lambda integration",
)

api_gateway_deployment = apigateway.Deployment(
    'api-gateway-deployment',
    rest_api=api_gateway.id,
    opts=pulumi.ResourceOptions(depends_on=[api_gateway]))

api_gateway_stage = apigateway.Stage(
    'api-gateway-stage',
    stage_name='dev',
Пример #16
0
blob = storage.Blob("appservice-b",
                    resource_group_name=resource_group.name,
                    account_name=storage_account.name,
                    container_name=storage_container.name,
                    type=storage.BlobType.BLOCK,
                    source=asset.FileArchive("wwwroot"))

blob_sas = storage.list_storage_account_service_sas_output(
    account_name=storage_account.name,
    protocols=storage.HttpProtocol.HTTPS,
    shared_access_start_time="2021-01-01",
    shared_access_expiry_time="2030-01-01",
    resource=storage.SignedResource.C,
    resource_group_name=resource_group.name,
    permissions=storage.Permissions.R,
    canonicalized_resource=Output.concat("/blob/", storage_account.name, "/",
                                         storage_container.name),
    content_type="application/json",
    cache_control="max-age=5",
    content_disposition="inline",
    content_encoding="deflate")

signed_blob_url = Output.concat("https://", storage_account.name,
                                ".blob.core.windows.net/",
                                storage_container.name, "/", blob.name, "?",
                                blob_sas.service_sas_token)

app_insights = insights.Component(
    "appservice-ai",
    application_type=insights.ApplicationType.WEB,
    kind="web",
    resource_group_name=resource_group.name)
            "Sid": "AWSCloudTrailCreateLogStream",
            "Effect": "Allow",
            "Action": ["logs:CreateLogStream"],
            "Resource": f"{resource}"
        }, {
            "Sid": "AWSCloudTrailPutLogEvents",
            "Effect": "Allow",
            "Action": ["logs:PutLogEvents"],
            "Resource": f"{resource}"
        }]
    })


# build resource string for iam_role_generate function
resource = Output.concat("arn:aws:logs:", region, ":", account_id,
                         ":log-group:", cloudwatch_log.name, ":log-stream:",
                         log_stream.name, "*")

# Apply policy to role
role_policy = iam.RolePolicy("cloudwatch_log_stream",
                             role=role.id,
                             policy=resource.apply(iam_role_generate))

# Create cloudtrail for s3 and write to s3 bucket
trail_s3 = cloudtrail.Trail('CloudTrail_logging_for_s3',
                            opts=ResourceOptions(depends_on=[bucket_policy]),
                            cloud_watch_logs_group_arn=cloudwatch_log.arn,
                            cloud_watch_logs_role_arn=role.arn,
                            enable_logging=True,
                            enable_log_file_validation=True,
                            name="Cloudtrail_s3",
Пример #18
0
database = pulumi_gcp.sql.Database(
    "database", instance=cloud_sql_instance.name, name=config.require("db-name")
)

users = pulumi_gcp.sql.User(
    "users",
    name=config.require("db-name"),
    instance=cloud_sql_instance.name,
    password=config.require_secret("db-password"),
)

sql_instance_url = Output.concat(
    "postgres://",
    config.require("db-name"),
    ":",
    config.require_secret("db-password"),
    "@/",
    config.require("db-name"),
    "?host=/cloudsql/",
    cloud_sql_instance.connection_name,
)

cloud_run = pulumi_gcp.cloudrun.Service(
    "default-service",
    location=Config("gcp").require("region"),
    template=pulumi_gcp.cloudrun.ServiceTemplateArgs(
        metadata=ServiceTemplateMetadataArgs(
            annotations={
                "run.googleapis.com/cloudsql-instances": cloud_sql_instance.connection_name
            }
        ),
        spec=pulumi_gcp.cloudrun.ServiceTemplateSpecArgs(