コード例 #1
0
    async def test_output_all_known_if_all_are_known(self):
        res = FakeCustomResource("some-resource")
        fut = asyncio.Future()
        fut.set_result(42)
        known_fut = asyncio.Future()
        known_fut.set_result(True)
        out = Output({res}, fut, known_fut)

        other = FakeCustomResource("some-other-resource")
        other_fut = asyncio.Future()
        other_fut.set_result(UNKNOWN)  # <- not known
        other_known_fut = asyncio.Future()
        other_known_fut.set_result(False)
        other_out = Output({other}, other_fut, other_known_fut)

        combined = Output.all(out, other_out)
        combined_dict = Output.all(out=out, other_out=other_out)
        deps = []
        prop = await rpc.serialize_property(combined, deps)
        prop_dict = await rpc.serialize_property(combined_dict, deps)
        self.assertSetEqual(set(deps), {res, other})

        # The contents of the list are unknown if any of the Outputs used to
        # create it were unknown.
        self.assertEqual(rpc.UNKNOWN, prop)
        self.assertEqual(rpc.UNKNOWN, prop_dict)
コード例 #2
0
def test_ephemeral_project():
    project_name = "my-project-name"
    root_project_name = "root-project-name"
    organization_name = "root-organization-name"
    args = ProjectArgs(project_name, root_project_name, organization_name)
    project = Project("name", args)

    def test_properties(args):
        new_project_id = args[0]
        assert new_project_id == project_name

    Output.all(project.new_project_id).apply(test_properties)
コード例 #3
0
ファイル: __main__.py プロジェクト: whatsinmyopsec/chat_thief
def allow_s3_bucket_access(s3_bucket):
    bucket_policy = Output.all(s3_bucket.arn).apply(lambda args: json.dumps({
        "Version":
        "2012-10-17",
        "Id":
        "BeginWorldExchange",
        "Statement": [
            {
                "Sid": "PublicAccess",
                "Effect": "Allow",
                "Principal": {
                    "AWS": "*"
                },
                "Action": "s3:Get*",
                "Resource": f"{args[0]}/*",
            },
            {
                "Sid": "BeginWriteAccess",
                "Effect": "Allow",
                "Principal": {
                    "AWS": "arn:aws:iam::851075464416:root"
                },
                "Action": "s3:Put*",
                "Resource": f"{args[0]}/*",
            },
        ],
    }))

    s3.BucketPolicy("beginworld-exchange-bucket-policy",
                    bucket=s3_bucket.id,
                    policy=bucket_policy)
コード例 #4
0
def allow_s3_bucket_access(s3_bucket, roles, lamda_function_arn):
    role_arns = [role.arn for role in roles]

    bucket_policy = Output.all(s3_bucket.arn,
                               role_arns).apply(lambda args: json.dumps({
                                   "Version":
                                   "2012-10-17",
                                   "Id":
                                   "MorgueFileBucketPolicy",
                                   "Statement": [
                                       {
                                           "Sid": "AllowThingsInTheBucket",
                                           "Effect": "Allow",
                                           "Principal": {
                                               "AWS": args[1]
                                           },
                                           "Action": "s3:*",
                                           "Resource": f"{args[0]}/*",
                                       },
                                       {
                                           "Sid": "AllowTheBucket",
                                           "Effect": "Allow",
                                           "Principal": {
                                               "AWS": args[1]
                                           },
                                           "Action": "s3:*",
                                           "Resource": f"{args[0]}",
                                       },
                                   ],
                               }))

    s3.BucketPolicy("morgue-file-bucket-policy",
                    bucket=s3_bucket.id,
                    policy=bucket_policy)
コード例 #5
0
    async def test_output_all(self):
        res = FakeCustomResource("some-resource")
        fut = asyncio.Future()
        fut.set_result(42)
        known_fut = asyncio.Future()
        known_fut.set_result(True)
        out = Output({res}, fut, known_fut)

        other = Output.from_input(99)
        combined = Output.all(out, other)
        combined_dict = Output.all(out=out, other=other)
        deps = []
        prop = await rpc.serialize_property(combined, deps)
        prop_dict = await rpc.serialize_property(combined_dict, deps)
        self.assertSetEqual(set(deps), {res})
        self.assertEqual([42, 99], prop)
        self.assertEqual({"out": 42, "other": 99}, prop_dict)
コード例 #6
0
    async def test_lifted_unknown(self):
        settings.SETTINGS.dry_run = True

        fut = asyncio.Future()
        fut.set_result(UNKNOWN)
        out = Output.from_input({ "foo": "foo", "bar": UNKNOWN, "baz": fut})

        self.assertFalse(await out.is_known())

        r1 = out["foo"]
        self.assertTrue(await r1.is_known())
        self.assertEqual(await r1.future(with_unknowns=True), "foo")

        r2 = out["bar"]
        self.assertFalse(await r2.is_known())
        self.assertEqual(await r2.future(with_unknowns=True), UNKNOWN)

        r3 = out["baz"]
        self.assertFalse(await r3.is_known())
        self.assertEqual(await r3.future(with_unknowns=True), UNKNOWN)

        r4 = out["baz"]["qux"]
        self.assertFalse(await r4.is_known())
        self.assertEqual(await r4.future(with_unknowns=True), UNKNOWN)

        out = Output.from_input([ "foo", UNKNOWN ])

        r5 = out[0]
        self.assertTrue(await r5.is_known())
        self.assertEqual(await r5.future(with_unknowns=True), "foo")

        r6 = out[1]
        self.assertFalse(await r6.is_known())
        self.assertEqual(await r6.future(with_unknowns=True), UNKNOWN)

        out = Output.all(Output.from_input("foo"), Output.from_input(UNKNOWN),
            Output.from_input([ Output.from_input(UNKNOWN), Output.from_input("bar") ]))

        self.assertFalse(await out.is_known())

        r7 = out[0]
        self.assertTrue(await r7.is_known())
        self.assertEqual(await r7.future(with_unknowns=True), "foo")

        r8 = out[1]
        self.assertFalse(await r8.is_known())
        self.assertEqual(await r8.future(with_unknowns=True), UNKNOWN)

        r9 = out[2]
        self.assertFalse(await r9.is_known())

        r10 = r9[0]
        self.assertFalse(await r10.is_known())
        self.assertEqual(await r10.future(with_unknowns=True), UNKNOWN)

        r11 = r9[1]
        self.assertTrue(await r11.is_known())
        self.assertEqual(await r11.future(with_unknowns=True), "bar")
コード例 #7
0
    def __init__(self,
                 name: str,
                 account_id: Input[str],
                 account_name: Input[str],
                 access_role_name: Input[str],
                 username: Input[str],
                 user_policy_arn: Input[str],
                 password: Input[str],
                 opts: ResourceOptions = None):
        super().__init__("nuage/aws:organizations:AWSOrganizationAccountUser",
                         name, {}, opts)

        assume_role_arn = Output.all(account_id, access_role_name).apply(
            lambda a: f"arn:aws:iam::{a[0]}:role/{a[1]}")

        provider = aws.Provider("freelance-account-provider",
                                assume_role={"role_arn": assume_role_arn})

        user = iam.User(
            "freelance-account-user",
            name=Output.all(account_name,
                            username).apply(lambda a: f"{a[0]}-{a[1]}"),
            opts=pulumi.ResourceOptions(provider=provider))

        user_login_profile = dynamic_providers.iam.UserLoginProfile(
            "freelance-account-user-login-profile",
            username=user.name,
            password=password,
            assume_role=dynamic_providers.iam.AssumeRole(
                role_arn=assume_role_arn))

        user_policy_attachment = iam.UserPolicyAttachment(
            "freelance-account-user_UserAccessRole",
            policy_arn=user_policy_arn,
            user=user.name,
            opts=pulumi.ResourceOptions(provider=provider))

        self.console_url = Output.all(account_id).apply(
            lambda a: f"https://{a[0]}.signin.aws.amazon.com/console")

        self.username = user.name

        self.password = password

        self.register_outputs({})
コード例 #8
0
    def __init__(self,
                 name,
                 security_groups,
                 subnets,
                 vpc_id,
                 opts: ResourceOptions = None):

        super().__init__("redata:cluster:FileSystem", name, {}, opts)

        self.efs = aws.efs.FileSystem(name, encrypted=True)

        self.efs_policy = aws.efs.FileSystemPolicy(
            "redata-efs-policy",
            file_system_id=self.efs.id,
            policy=Output.all(self.efs.arn).apply(lambda args: json.dumps({
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect": "Deny",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action": "*",
                    # Included to avoid always seeing a diff on the policy in 'pulumi up':
                    "Resource": args[0],
                    "Condition": {
                        "Bool": {
                            "aws:SecureTransport": "false"
                        }
                    }
                }]
            })))

        efs_secgrp = aws.ec2.SecurityGroup(
            f'{name}-secgrp',
            vpc_id=vpc_id,
            description='Enable EFS mount target access',
            ingress=[
                aws.ec2.SecurityGroupIngressArgs(
                    protocol='tcp',
                    from_port=2049,
                    to_port=2049,
                    security_groups=security_groups)
            ],
        )

        efs_mount_targets = []
        for i, subnet_id in enumerate(subnets):
            target = aws.efs.MountTarget(
                f"{name}-mount-{i}",
                file_system_id=self.efs.id,
                security_groups=[efs_secgrp.id],
                subnet_id=subnet_id,
            )
            efs_mount_targets.append(target)

        self.register_outputs({})
コード例 #9
0
ファイル: EKS.py プロジェクト: dechiad1/pulumi-aws-pave
    def _build_asg_userdata(self, cluster, name):
        user_data = Output.all(cluster.endpoint, cluster.certificate_authority).apply(lambda args: """
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint %s --b64-cluster-ca %s %s
""" % (args[0], args[1]['data'], name))

        print(user_data)
        return user_data
コード例 #10
0
def create_permission(app: Output[Application]) -> Output:
    return Output.all(app.application_id, app.oauth2_permissions).apply(lambda args: {
        "resourceAppId": args[0],
        "resourceAccess": [
            {
                "id": args[1][0]["id"],
                "type": "Scope"
            }
        ]
    })
コード例 #11
0
    async def test_output_all(self):
        res = TestCustomResource("some-resource")
        fut = asyncio.Future()
        fut.set_result(42)
        known_fut = asyncio.Future()
        known_fut.set_result(True)
        out = Output({res}, fut, known_fut)

        other = Output.from_input(99)
        combined = Output.all(out, other)
        deps = []
        prop = await rpc.serialize_property(combined, deps)
        self.assertListEqual(deps, [res])
        self.assertEqual([42, 99], prop)
コード例 #12
0
def test_cross_project_registry():
    bucket_name = "my-bucketname"
    target_project_number = "target_project_number"
    args = CrossProjectCloudRunAccessArgs(
        bucket_name, Output.from_input(target_project_number)
    )

    cloud_run_access = CrossProjectCloudRunAccess(
        "my-cloudrun-cross-registry-access", args, None
    )

    def test_properties(args):
        observed_bucket_name, observed_member = args
        assert observed_bucket_name == bucket_name
        assert observed_member == _cloudrun_service_account(target_project_number)

    return Output.all(
        cloud_run_access.bucket_policy.bucket, cloud_run_access.bucket_policy.member
    ).apply(test_properties)
コード例 #13
0
ファイル: fs.py プロジェクト: patrika1979/redata
    def __init__(
        self, name, file_system: FileSystem, path, opts: ResourceOptions = None
    ):

        super().__init__("redata:cluster:FileSystem", name, {}, opts)

        self.ap = aws.efs.AccessPoint(
            name,
            file_system_id=file_system.efs.id,
            posix_user=aws.efs.AccessPointPosixUserArgs(uid=1000, gid=1000),
            root_directory=aws.efs.AccessPointRootDirectoryArgs(
                path=path,
                creation_info=aws.efs.AccessPointRootDirectoryCreationInfoArgs(
                    owner_uid=1000, owner_gid=1000, permissions="755"
                ),
            ),
            opts=ResourceOptions(parent=self),
        )

        self.policy_document = Output.all(file_system.efs.arn, self.ap.arn).apply(
            lambda args: json.dumps(
                {
                    "Version": "2012-10-17",
                    "Statement": [
                        {
                            "Effect": "Allow",
                            "Action": [
                                "elasticfilesystem:ClientMount",
                                "elasticfilesystem:ClientWrite",
                            ],
                            "Resource": args[0],
                            "Condition": {
                                "StringEquals": {
                                    "elasticfilesystem:AccessPointArn": args[1]
                                }
                            },
                        }
                    ],
                }
            )
        )

        self.register_outputs({})
コード例 #14
0
    async def test_output_all_composes_dependencies(self):
        res = TestCustomResource("some-resource")
        fut = asyncio.Future()
        fut.set_result(42)
        known_fut = asyncio.Future()
        known_fut.set_result(True)
        out = Output({res}, fut, known_fut)

        other = TestCustomResource("some-other-resource")
        other_fut = asyncio.Future()
        other_fut.set_result(99)
        other_known_fut = asyncio.Future()
        other_known_fut.set_result(True)
        other_out = Output({other}, other_fut, other_known_fut)

        combined = Output.all(out, other_out)
        deps = []
        prop = await rpc.serialize_property(combined, deps)
        self.assertSetEqual(set(deps), {res, other})
        self.assertEqual([42, 99], prop)
コード例 #15
0
def create_queue_and_policy(name):
    queue = sqs.Queue(f"{name}-queue", visibility_timeout_seconds=200)

    policy = Output.all(queue.arn).apply(
        lambda args: json.dumps(
            {
                "Version": "2012-10-17",
                "Id": f"{name}-policy",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": ["SQS:*"],
                        "Resource": args[0],
                        "Principal": "*",
                    }
                ],
            }
        )
    )

    sqs.QueuePolicy(f"{name}-very-permissive", policy=policy, queue_url=queue.id)

    return queue
コード例 #16
0
ファイル: __main__.py プロジェクト: forkkit/pulumi-examples
        "service": "false",
        "container": "false",
        "object": "true"
    },
    permissions={
        "read": "true",
        "write": "false",
        "delete": "false",
        "add": "false",
        "list": "false",
        "create": "false",
        "update": "false",
        "process": "false"
    },
)
httpdotnet_signed_blob_url = Output.all(httpdotnet_storage_account.name, httpdotnet_container.name, httpdotnet_zib_blob.name, account_sas.sas) \
    .apply(lambda args: f"https://{args[0]}.blob.core.windows.net/{args[1]}/{args[2]}{args[3]}")

httpdotnet_plan = appservice.Plan("http-dotnet",
                                  resource_group_name=resource_group.name,
                                  kind="FunctionApp",
                                  sku={
                                      "tier": "Dynamic",
                                      "size": "Y1"
                                  })

httpdotnet_function_app = appservice.FunctionApp(
    "http-dotnet",
    resource_group_name=resource_group.name,
    app_service_plan_id=httpdotnet_plan.id,
    storage_connection_string=httpdotnet_storage_account.
    primary_connection_string,
コード例 #17
0
environment = Output.all(
    airflow_db.address,  # 0
    airflow_db.password,  # 1
    sd_namespace.name,  # 2
    redata_db.address,  # 3
    redata_db.password,  # 4
    redata_sources,  # 5
    airflow_admin_password,  # 6
    grafana_admin_password,  # 7
).apply(lambda args: [
    # Airflow DB
    {
        "name": "AIRFLOW_CONN_METADATA_DB",
        "value":
        f"postgres+psycopg2://airflow:{args[1]}@{args[0]}:5432/airflow"
    },
    {
        "name": "AIRFLOW_VAR__METADATA_DB_SCHEMA",
        "value": "airflow"
    },
    # Airflow Config
    {
        "name": "AIRFLOW__CORE__LOAD_DEFAULT_CONNECTIONS",
        "value": "False"
    },
    {
        "name": "AIRFLOW__CORE__SQL_ALCHEMY_CONN",
        "value":
        f"postgres+psycopg2://airflow:{args[1]}@{args[0]}:5432/airflow"
    },
    {
        "name": "AIRFLOW__CORE__DAGS_FOLDER",
        "value": "/usr/local/redata/redata/dags"
    },
    {
        "name": "AIRFLOW__CORE__EXECUTOR",
        "value": "LocalExecutor"
    },
    {
        "name": "AIRFLOW__LOGGING__BASE_LOG_FOLDER",
        "value": airflow_base_log_folder
    },
    # - Front-end IPs that are allowed to set secure headers; only our ALB can talk to us, so set it to *
    #   (see https://docs.gunicorn.org/en/stable/settings.html#forwarded-allow-ips)
    {
        "name": "FORWARDED_ALLOW_IPS",
        "value": "*"
    },
    # - Set proper base URL for redirects etc
    {
        "name": "AIRFLOW__WEBSERVER__BASE_URL",
        "value": f'{base_url}/airflow'
    },
    # - Admin user setup (via entrypoint script):
    {
        "name": "AIRFLOW_SECURITY_ADMIN_USER",
        "value": "admin"
    },
    {
        "name": "AIRFLOW_SECURITY_ADMIN_PASSWORD",
        "value": args[6]
    },
    {
        "name": "AIRFLOW_SECURITY_ADMIN_EMAIL",
        "value": airflow_admin_email
    },
    # Grafana Config
    {
        "name":
        "GF_INSTALL_PLUGINS",
        "value":
        "grafana-polystat-panel,grafana-clock-panel,grafana-simple-json-datasource"
    },
    {
        "name": "GF_SECURITY_ADMIN_USER",
        "value": "admin"
    },
    {
        "name": "GF_SECURITY_ADMIN_PASSWORD",
        "value": args[7]
    },
    {
        'name': 'GF_SERVER_ROOT_URL',
        'value': f'{base_url}/grafana'
    },
    {
        'name': 'GF_SERVER_SERVE_FROM_SUB_PATH',
        'value': 'true'
    },
    # Redata DB
    {
        "name": "REDATA_METRICS_DATABASE_HOST",
        "value": args[3]
    },
    {
        "name": "REDATA_METRICS_DATABASE_USER",
        "value": "redata"
    },
    {
        "name": "REDATA_METRICS_DATABASE_PASSWORD",
        "value": args[4]
    },
    {
        "name": "REDATA_METRICS_DATABASE_NAME",
        "value": "redata"
    },
    {
        "name": "REDATA_METRICS_DB_URL",
        "value": f"postgres://*****:*****@{args[3]}:5432/redata"
    },
    # Redata Config
    {
        'name': 'GRAFANA_WEB_HOST',
        'value': f'grafana-web.{args[2]}'
    },
    {
        'name': 'GRAFANA_WEB_PORT',
        'value': '3000'
    },
    {
        "name": "REDATA_AIRFLOW_SCHEDULE_INTERVAL",
        "value": redata_airflow_schedule_interval
    },
    {
        "name": "REDATA_TIME_COL_BLACKLIST_REGEX",
        "value": redata_time_col_blacklist_regex
    },
] + [{
    "name": f"REDATA_SOURCE_DB_URL_{name}",
    "value": url
} for name, url in args[5].items()])
コード例 #18
0
policy = Output.all(
    dynamodb_table.arn,
    dynamodb_table.stream_arn,
    sns_topic.arn,
    weapons_topic.arn,
    chat_stream.arn,
    bucket.arn,
    twitch_chat_bot.arn,
    errors_queue.arn,
    weapons_queue.arn,
    xl_upgrades_queue.arn,
    gods_queue.arn,
    error_stream.arn,
).apply(
    lambda args: json.dumps(
        {
            "Version": "2012-10-17",
            "Id": f"{MODULE_NAME}-policy",
            "Statement": [
                CREATE_CW_LOGS_POLICY,
                {"Effect": "Allow", "Action": ["dynamodb:*"], "Resource": args[0]},
                {"Effect": "Allow", "Action": ["dynamodb:*"], "Resource": args[1]},
                {
                    "Effect": "Allow",
                    "Action": ["sns:*"],
                    "Resource": [args[2], args[3]],
                },
                {"Effect": "Allow", "Action": ["kinesis:*"], "Resource": args[4]},
                {
                    "Effect": "Allow",
                    "Action": ["s3:ListObjectsV2"],
                    "Resource": args[5],
                },
                {"Effect": "Allow", "Action": ["lambda:*"], "Resource": args[6]},
                {
                    "Effect": "Allow",
                    "Action": ["SQS:*"],
                    "Resource": [args[7], args[8], args[9], args[10]],
                },
                {"Effect": "Allow", "Action": ["kinesis:*"], "Resource": args[11]},
            ],
        }
    )
)
コード例 #19
0
    def __init__(self, name: str, args: WebServerArgs, opts: ResourceOptions = None):
        super().__init__("custom:app:WebServer", name, {}, opts)

        child_opts = ResourceOptions(parent=self)

        public_ip = network.PublicIp(
            "server-ip",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            allocation_method="Dynamic",
            opts=child_opts,
        )

        network_iface = network.NetworkInterface(
            "server-nic",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            ip_configurations=[
                network.NetworkInterfaceIpConfigurationArgs(
                    name="webserveripcfg",
                    subnet_id=args.subnet.id,
                    private_ip_address_allocation="Dynamic",
                    public_ip_address_id=public_ip.id,
                )
            ],
            opts=child_opts,
        )

        userdata = """#!/bin/bash
        echo "Hello, World!" > index.html
        nohup python -m SimpleHTTPServer 80 &"""

        vm = compute.VirtualMachine(
            "server-vm",
            resource_group_name=args.resource_group.name,
            location=args.resource_group.location,
            network_interface_ids=[network_iface.id],
            vm_size="Standard_A0",
            delete_data_disks_on_termination=True,
            delete_os_disk_on_termination=True,
            os_profile=compute.VirtualMachineOsProfileArgs(
                computer_name="hostname",
                admin_username=args.username,
                admin_password=args.password,
                custom_data=userdata,
            ),
            os_profile_linux_config=compute.VirtualMachineOsProfileLinuxConfigArgs(
                disable_password_authentication=False,
            ),
            storage_os_disk=compute.VirtualMachineStorageOsDiskArgs(
                create_option="FromImage",
                name="myosdisk1",
            ),
            storage_image_reference=compute.VirtualMachineStorageImageReferenceArgs(
                publisher="canonical",
                offer="UbuntuServer",
                sku="16.04-LTS",
                version="latest",
            ),
            opts=child_opts,
        )

        # The public IP address is not allocated until the VM is running, so we wait
        # for that resource to create, and then lookup the IP address again to report
        # its public IP.
        combined_output = Output.all(
            vm.id, public_ip.name, public_ip.resource_group_name
        )
        self.public_ip_addr = combined_output.apply(
            lambda lst: network.get_public_ip(name=lst[1], resource_group_name=lst[2]).ip_address
        )
        self.register_outputs({})
コード例 #20
0
    },
    node_config={
        'machine_type':
        NODE_MACHINE_TYPE,
        'oauth_scopes': [
            'https://www.googleapis.com/auth/compute',
            'https://www.googleapis.com/auth/devstorage.read_only',
            'https://www.googleapis.com/auth/logging.write',
            'https://www.googleapis.com/auth/monitoring'
        ],
    },
)

# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint,
                      k8s_cluster.master_auth)
k8s_config = k8s_info.apply(lambda info: """apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: {0}
    server: https://{1}
  name: {2}
contexts:
- context:
    cluster: {2}
    user: {2}
  name: {2}
current-context: {2}
kind: Config
preferences: {{}}
users:
コード例 #21
0
            "user": {
                "exec": {
                    "apiVersion": "client.authentication.k8s.io/v1alpha1",
                    "command": "aws-iam-authenticator",
                    "args": [
                        "token",
                        "-i",
                        f"{cluster_name}",
                    ],
                },
            },
        }],
    })

# Create the KubeConfig Structure as per https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
kubeconfig = Output.all(cluster.endpoint, cluster.certificate_authority["data"], cluster.name).apply(lambda args: generateKubeconfig(args[0], args[1], args[2]))

# Declare a provider using the KubeConfig we created
# This will be used to interact with the EKS cluster
k8s_provider = Provider("k8s-provider", kubeconfig=kubeconfig)

# Create a Namespace object https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
ns = Namespace("app-ns",
    metadata={
       "name": "joe-duffy",
    },
    opts=ResourceOptions(provider=k8s_provider)
)

app_labels = {
    "app": "iac-workshop"
コード例 #22
0
        "node_size": "Standard_D2_v2"
    },
]

cluster_names = []
for config in aks_cluster_config:
    cluster = containerservice.KubernetesCluster(
        "aksCluster-%s" % config["name"],
        resource_group_name=resource_group.name,
        linux_profile=containerservice.KubernetesClusterLinuxProfileArgs(
            admin_username="******",
            ssh_key=containerservice.KubernetesClusterLinuxProfileSshKeyArgs(
                key_data=ssh_public_key, ),
        ),
        service_principal=containerservice.
        KubernetesClusterServicePrincipalArgs(
            client_id=ad_app.application_id,
            client_secret=ad_sp_password.value),
        location=config["location"],
        default_node_pool=containerservice.
        KubernetesClusterDefaultNodePoolArgs(
            name="aksagentpool",
            node_count=config["node_count"],
            vm_size=config["node_size"],
        ),
        dns_prefix="sample-kube",
    )
    cluster_names.append(cluster.name)

export("aks_cluster_names", Output.all(cluster_names))
コード例 #23
0

class FinalResource(CustomResource):
    number: Output[str]

    def __init__(self, name, number):
        CustomResource.__init__(self, "test:index:FinalResource", name, {
            "number": number,
        })


def assert_eq(lhs, rhs):
    assert lhs == rhs


res1 = MyResource("testResource1")
res2 = MyResource("testResource2")

res1.number.apply(lambda n: assert_eq(n, 2))
res2.number.apply(lambda n: assert_eq(n, 3))

# Output.all combines a list of outputs into an output of a list.
resSum = Output.all(res1.number, res2.number).apply(lambda l: l[0] + l[1])
FinalResource("testResource3", resSum)

# Test additional Output helpers
hello_world = Output.concat(
    "Hello ",
    Output.from_input("world!")).apply(lambda s: assert_eq(s, "Hello world!"))
export("helloworld", hello_world)
コード例 #24
0
ファイル: __main__.py プロジェクト: wchaws/examples
        start="2020-01-01",
        expiry="2030-01-01",
        container_name=args[2],
        permissions={
            "read": "true",
            "write": "false",
            "delete": "false",
            "list": "false",
            "add": "false",
            "create": "false"
        })
    return f"https://{args[0]}.blob.core.windows.net/{args[2]}/{args[3]}{blob_sas.sas}"


signed_blob_url = Output.all(storage_account.name,
                             storage_account.primary_connection_string,
                             storage_container.name, blob.name).apply(get_sas)

app_insights = appinsights.Insights("appservice-ai",
                                    resource_group_name=resource_group.name,
                                    location=resource_group.location,
                                    application_type="web")

sql_server = sql.SqlServer("appservice-sql",
                           resource_group_name=resource_group.name,
                           administrator_login=username,
                           administrator_login_password=pwd,
                           version="12.0")

database = sql.Database("appservice-db",
                        resource_group_name=resource_group.name,
コード例 #25
0
ファイル: __main__.py プロジェクト: GreenstoneCS/examples
    special=True,
).result

sql_server = sql.SqlServer(
    "sqlserver",
    resource_group_name=resource_group.name,
    administrator_login_password=administrator_login_password,
    administrator_login="******",
    version="12.0")

database = sql.Database("sqldb",
                        resource_group_name=resource_group.name,
                        server_name=sql_server.name,
                        requested_service_objective_name="S0")

connection_string = Output.all(sql_server.name, database.name) \
    .apply(lambda args: f"Server=tcp:{args[0]}.database.windows.net;Database={args[1]};") or "1111"

text_blob = storage.Blob("text",
                         storage_account_name=storage_account.name,
                         storage_container_name=container.name,
                         type="Block",
                         source=asset.FileAsset("./README.md"))

app_service_plan = appservice.Plan("asp",
                                   resource_group_name=resource_group.name,
                                   kind="App",
                                   sku=appservice.PlanSkuArgs(tier="Basic",
                                                              size="B1"))

blob = storage.Blob(
    "zip",
コード例 #26
0
            },
            "name": "kubernetes",
        }],
        "contexts": [{
            "context": {
                "cluster": "kubernetes",
                "user": "******",
            },
            "name": "aws",
        }],
        "current-context": "aws",
        "kind": "Config",
        "users": [{
            "name": "aws",
            "user": {
                "exec": {
                    "apiVersion": "client.authentication.k8s.io/v1alpha1",
                    "command": "aws-iam-authenticator",
                    "args": [
                        "token",
                        "-i",
                        f"{cluster_name}",
                    ],
                },
            },
        }],
    })

# Create the KubeConfig Structure as per https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
kubeconfig = Output.all(cluster.endpoint, cluster.certificate_authority["data"], cluster.name).apply(lambda args: generateKubeconfig(args[0], args[1], args[2]))
コード例 #27
0
            create_option="FromImage",
            name="myosdisk1",
            caching="ReadWrite",
            disk_size_gb=100,
        ),
        image_reference=compute.ImageReferenceArgs(
            publisher="canonical",
            offer="UbuntuServer",
            sku="18.04-LTS",
            version="latest",
        ),
    ),
)

# Get IP address as an output.
combined_output = Output.all(server.id, public_ip.name, resource_group.name)
public_ip_addr = combined_output.apply(
    lambda lst: network.get_public_ip_address(public_ip_address_name=lst[1],
                                              resource_group_name=lst[2]))

# Create connection object to server.
conn = provisioners.ConnectionArgs(
    host=public_ip_addr.ip_address,
    username=admin_username,
    private_key=private_key,
    private_key_passphrase=private_key_passphrase,
)

# Copy install script to server.
cp_config = provisioners.CopyFile(
    'config',
コード例 #28
0
ファイル: __main__.py プロジェクト: 8secz-johndpope/multi-iac
                            location=resource_group.location,
                            network_interface_ids=[network_iface.id],
                            vm_size="Standard_A0",
                            delete_data_disks_on_termination=True,
                            delete_os_disk_on_termination=True,
                            os_profile={
                                "computer_name": "hostname",
                                "admin_username": username,
                                "admin_password": password,
                                "custom_data": userdata,
                            },
                            os_profile_linux_config={
                                "disable_password_authentication": False,
                            },
                            storage_os_disk={
                                "create_option": "FromImage",
                                "name": "myosdisk1",
                            },
                            storage_image_reference={
                                "publisher": "canonical",
                                "offer": "UbuntuServer",
                                "sku": "16.04-LTS",
                                "version": "latest",
                            })

combined_output = Output.all(vm.id, public_ip.name,
                             public_ip.resource_group_name)
public_ip_addr = combined_output.apply(
    lambda lst: network.get_public_ip(name=lst[1], resource_group_name=lst[2]))
pulumi.export("public_ip", public_ip_addr.ip_address)
コード例 #29
0
        "horizontalPodAutoscaling": {
            "disabled": "false"
        },
        # "httpLoadBalancing": {"disabled": False},
        "istioConfig": {
            "disabled": False,
            "auth": "AUTH_MUTUAL_TLS"
        },
    },
    # Equivalent of --no-enable-stackdriver-kubernetes
    logging_service=None,
)

# Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
# gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
k8s_info = Output.all(k8s_cluster.name, k8s_cluster.endpoint,
                      k8s_cluster.master_auth)
k8s_config = k8s_info.apply(lambda info: """apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: {0}
    server: https://{1}
  name: {2}
contexts:
- context:
    cluster: {2}
    user: {2}
  name: {2}
current-context: {2}
kind: Config
preferences: {{}}
users:
コード例 #30
0
ファイル: __main__.py プロジェクト: gbaeke/pulumi-samples
        network_interface_ids=[nic.id],
        os_disk={
            "caching": "None",
            "storage_account_type": "Standard_LRS"
        },
        size=config["size"],
        source_image_reference=config["img"])
    vm_ids.append(vm.id)

# create azure sql server
sqlserver = sql.SqlServer("sso-sql",
                          resource_group_name=rg.name,
                          administrator_login="******",
                          administrator_login_password=sqlpwd,
                          version="12.0")

# create sql database
sqldb = sql.Database("sso-db",
                     resource_group_name=rg.name,
                     server_name=sqlserver.name,
                     edition="Standard",
                     requested_service_objective_name="S1")

# connection string (thanks Pulumi docs)
connection_string = Output.all(sqlserver.name, sqldb.name, sql_user, sqlpwd) \
    .apply(lambda args: f"Server=tcp:{args[0]}.database.windows.net;initial catalog={args[1]};user ID={args[2]};password={args[3]};Min Pool Size=0;Max Pool Size=30;Persist Security Info=true;")

# Export the connection string for the storage account
pulumi.export('vm_ids', vm_ids)
pulumi.export('connstr', connection_string)