Beispiel #1
0
    def populate_tf_resource_rds(self, resource, namespace_info,
                                 existing_secrets):
        account, identifier, values, output_prefix, output_resource_name = \
            self.init_values(resource, namespace_info)

        tf_resources = []
        self.init_common_outputs(tf_resources, namespace_info, output_prefix,
                                 output_resource_name)

        try:
            password = \
                existing_secrets[account][output_prefix]['db.password']
        except KeyError:
            password = \
                self.determine_db_password(namespace_info,
                                           output_resource_name)
        values['password'] = password

        # rds instance
        # Ref: https://www.terraform.io/docs/providers/aws/r/db_instance.html
        tf_resource = aws_db_instance(identifier, **values)
        tf_resources.append(tf_resource)
        # rds outputs
        # we want the outputs to be formed into an OpenShift Secret
        # with the following fields
        # db.host
        output_name = output_prefix + '[db.host]'
        output_value = '${' + tf_resource.fullname + '.address}'
        tf_resources.append(output(output_name, value=output_value))
        # db.port
        output_name = output_prefix + '[db.port]'
        output_value = '${' + tf_resource.fullname + '.port}'
        tf_resources.append(output(output_name, value=output_value))
        # db.name
        output_name = output_prefix + '[db.name]'
        output_value = values['name']
        tf_resources.append(output(output_name, value=output_value))
        # db.user
        output_name = output_prefix + '[db.user]'
        output_value = values['username']
        tf_resources.append(output(output_name, value=output_value))
        # db.password
        output_name = output_prefix + '[db.password]'
        output_value = values['password']
        tf_resources.append(output(output_name, value=output_value))

        for tf_resource in tf_resources:
            self.add_resource(account, tf_resource)
Beispiel #2
0
def test_issue33():
    """Issue 33: Suggestion of syntax to terrascript"""

    ts = Terrascript()

    var_access_key = ts.add(variable('access_key'))
    assert isinstance(var_access_key, variable)

    var_secret_key = ts.add(variable('secret_key'))
    assert isinstance(var_secret_key, variable)

    var_region = ts.add(variable('region', default='us-east-1'))
    assert isinstance(var_region, variable)

    ts += provider('aws', access_key=var_access_key, 
                   secret_key=var_secret_key, region=var_region
    )

    resource_aws_instance = ts.add(aws_instance('example', ami='ami-2757f631', instance_type='t2.micro'))
    assert isinstance(resource_aws_instance, aws_instance)

    ts += output('example_public_ip', value=resource_aws_instance.public_ip, 
                 description='Public IP of example')

    assert ts.validate() is True
Beispiel #3
0
    def populate_tf_resource_elasticache(self, resource, namespace_info,
                                         existing_secrets):
        account, identifier, values, output_prefix, output_resource_name = \
            self.init_values(resource, namespace_info)
        values['replication_group_id'] = values['identifier']
        values.pop('identifier', None)

        tf_resources = []
        self.init_common_outputs(tf_resources, namespace_info, output_prefix,
                                 output_resource_name)

        try:
            auth_token = \
                existing_secrets[account][output_prefix]['db.auth_token']
        except KeyError:
            auth_token = \
                self.determine_db_password(namespace_info,
                                           output_resource_name,
                                           secret_key='db.auth_token')
        values['auth_token'] = auth_token

        # elasticache replication group
        # Ref: https://www.terraform.io/docs/providers/aws/r/
        # elasticache_replication_group.html
        tf_resource = aws_elasticache_replication_group(identifier, **values)
        tf_resources.append(tf_resource)
        # elasticache outputs
        # we want the outputs to be formed into an OpenShift Secret
        # with the following fields
        # db.endpoint
        output_name = output_prefix + '[db.endpoint]'
        output_value = '${' + tf_resource.fullname + \
                       '.configuration_endpoint_address}'
        tf_resources.append(output(output_name, value=output_value))
        # db.port
        output_name = output_prefix + '[db.port]'
        output_value = '${' + tf_resource.fullname + '.port}'
        tf_resources.append(output(output_name, value=output_value))
        # db.auth_token
        output_name = output_prefix + '[db.auth_token]'
        output_value = values['auth_token']
        tf_resources.append(output(output_name, value=output_value))

        for tf_resource in tf_resources:
            self.add_resource(account, tf_resource)
Beispiel #4
0
    def create_gridnetwork(self, name, machine_type, zone, apply=True):
        """
        args:
            name: name of the compute instance
            machine_type: the type of machine
            zone: zone of your GCP project
            apply: to call terraform apply at the end
        """
        pygrid_network_ip = terrascript.resource.google_compute_address(name, name=name,)
        self.config += pygrid_network_ip

        self.config += terrascript.output(
            name + "-ip", value="${" + pygrid_network_ip.address + "}",
        )

        self.expose_port(name="pygrid", apply=False)

        image = terrascript.data.google_compute_image(
            name + "pytorch",
            family="pytorch-latest-gpu-debian-10",
            project="deeplearning-platform-release",
        )
        self.config += image

        node = terrascript.resource.google_compute_instance(
            name,
            name=name,
            machine_type=machine_type,
            zone=zone,
            boot_disk={"initialize_params": {"image": "${" + image.self_link + "}"}},
            network_interface={
                "network": "default",
                "access_config": {"nat_ip": "${" + pygrid_network_ip.address + "}"},
            },
            metadata_startup_script="""
                #!/bin/bash
                apt-get update
                apt-get -y upgrade
                sudo -i bash -c 'pip install git+https://github.com/OpenMined/PyGridNetwork.git'
                sudo -i bash -c 'echo Starting PyGridNetwork & \
                python -m gridnetwork --port=80 --start_local_db'""",
        )
        self.config += node

        with open("main.tf.json", "w") as main_config:
            json.dump(self.config, main_config, indent=2, sort_keys=False)

        if apply:
            if IPython.get_ipython():
                terraform_notebook.apply()
            else:
                terraform_script.apply()
Beispiel #5
0
 def init_common_outputs(self, tf_resources, namespace_info, output_prefix,
                         output_resource_name):
     output_format = '{}[{}.{}]'
     cluster, namespace = self.unpack_namespace_info(namespace_info)
     output_name = output_format.format(output_prefix,
                                        self.integration_prefix, 'cluster')
     output_value = cluster
     tf_resources.append(output(output_name, value=output_value))
     output_name = output_format.format(output_prefix,
                                        self.integration_prefix,
                                        'namespace')
     output_value = namespace
     tf_resources.append(output(output_name, value=output_value))
     output_name = output_format.format(output_prefix,
                                        self.integration_prefix, 'resource')
     output_value = 'Secret'
     tf_resources.append(output(output_name, value=output_value))
     output_name = output_format.format(output_prefix,
                                        self.integration_prefix,
                                        'output_resource_name')
     output_value = output_resource_name
     tf_resources.append(output(output_name, value=output_value))
Beispiel #6
0
    def reserve_ip(self, name, apply=True):
        """
        args:
            name: name of the reversed ip
            apply: to call terraform apply at the end
        """
        pygrid_network_ip = terrascript.resource.google_compute_address(name, name=name,)
        self.config += pygrid_network_ip

        self.config += terrascript.output(
            name + "-ip", value="${" + pygrid_network_ip.address + "}",
        )
        with open("main.tf.json", "w") as main_config:
            json.dump(self.config, main_config, indent=2, sort_keys=False)

        if apply:
            if IPython.get_ipython():
                terraform_notebook.apply()
            else:
                terraform_script.apply()
Beispiel #7
0
workerVar.persistent_volumes = [VolumeClaim(o, region, "volume-nyc3-01")]
workerVar.total_instances = 1
persistent_worker = Worker(o, workerVar)
persistent_worker.create_workers()

# ---------------------------------------------
# Creating Firewall
# ---------------------------------------------
create_firewall(o, domain=domain, inbound_ports=[22, 80, 443, 9000], tag="cluster")


# ---------------------------------------------
# Outputs
# ---------------------------------------------
o.terrascript.add(output("manager_ips",
                         value=[value.ipv4_address for value in o.shared["manager_nodes"]],
                         description="The manager nodes public ipv4 addresses"))

o.terrascript.add(output("manager_ips_private",
                         value=[value.ipv4_address_private for value in o.shared["manager_nodes"]],
                         description="The manager nodes private ipv4 addresses"))

o.terrascript.add(output("worker_ips",
                         value=[value.ipv4_address for value in o.shared["worker_nodes"]],
                         description="The worker nodes public ipv4 addresses"))

o.terrascript.add(output("worker_ips_private",
                         value=[value.ipv4_address_private for value in o.shared["worker_nodes"]],
                         description="The worker nodes private ipv4 addresses"))

o.terrascript.add(output("manager_token",
Beispiel #8
0
    def populate_tf_resource_s3(self, resource, namespace_info):
        account, identifier, common_values, \
            output_prefix, output_resource_name = \
            self.init_values(resource, namespace_info)

        tf_resources = []
        self.init_common_outputs(tf_resources, namespace_info, output_prefix,
                                 output_resource_name)

        # s3 bucket
        # Terraform resource reference:
        # https://www.terraform.io/docs/providers/aws/r/s3_bucket.html
        values = {}
        values['bucket'] = identifier
        values['versioning'] = {'enabled': True}
        values['tags'] = common_values['tags']
        values['acl'] = common_values['acl']
        bucket_tf_resource = aws_s3_bucket(identifier, **values)
        tf_resources.append(bucket_tf_resource)
        output_name = output_prefix + '[bucket]'
        output_value = '${' + bucket_tf_resource.fullname + '.bucket}'
        tf_resources.append(output(output_name, value=output_value))

        # iam resources
        # Terraform resource reference:
        # https://www.terraform.io/docs/providers/aws/r/iam_access_key.html

        # iam user for bucket
        values = {}
        values['name'] = identifier
        values['tags'] = common_values['tags']
        values['depends_on'] = [bucket_tf_resource]
        user_tf_resource = aws_iam_user(identifier, **values)
        tf_resources.append(user_tf_resource)

        # iam access key for user
        values = {}
        values['user'] = identifier
        values['depends_on'] = [user_tf_resource]
        tf_resource = aws_iam_access_key(identifier, **values)
        tf_resources.append(tf_resource)
        output_name = output_prefix + '[aws_access_key_id]'
        output_value = '${' + tf_resource.fullname + '.id}'
        tf_resources.append(output(output_name, value=output_value))
        output_name = output_prefix + '[aws_secret_access_key]'
        output_value = '${' + tf_resource.fullname + '.secret}'
        tf_resources.append(output(output_name, value=output_value))

        # iam user policy for bucket
        values = {}
        values['user'] = identifier
        values['name'] = identifier
        policy = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Sid": "ListObjectsInBucket",
                "Effect": "Allow",
                "Action": ["s3:ListBucket"],
                "Resource": ["arn:aws:s3:::{0}".format(identifier)]
            }, {
                "Sid": "AllObjectActions",
                "Effect": "Allow",
                "Action": "s3:*Object",
                "Resource": ["arn:aws:s3:::{0}/*".format(identifier)]
            }]
        }
        values['policy'] = json.dumps(policy, sort_keys=True)
        values['depends_on'] = [user_tf_resource]
        tf_resource = aws_iam_user_policy(identifier, **values)
        tf_resources.append(tf_resource)

        for tf_resource in tf_resources:
            self.add_resource(account, tf_resource)
Beispiel #9
0
    def populate_iam_users(self, tf_query):
        for role in tf_query:
            users = role['users']
            if len(users) == 0:
                continue

            aws_groups = role['aws_groups']
            for ig in range(len(aws_groups)):
                group_name = aws_groups[ig]['name']
                account_name = aws_groups[ig]['account']['name']
                account_console_url = aws_groups[ig]['account']['consoleUrl']

                # we want to include the console url in the outputs
                # to be used later to generate the email invitations
                output_name = '{}.console-urls[{}]'.format(
                    self.integration_prefix, account_name)
                output_value = account_console_url
                tf_output = output(output_name, value=output_value)
                self.add_resource(account_name, tf_output)

                for iu in range(len(users)):
                    user_name = users[iu]['redhat_username']

                    # Ref: terraform aws iam_user
                    tf_iam_user = self.get_tf_iam_user(user_name)
                    self.add_resource(account_name, tf_iam_user)

                    # Ref: terraform aws iam_group_membership
                    tf_iam_group = self.get_tf_iam_group(group_name)
                    tf_iam_user_group_membership = \
                        aws_iam_user_group_membership(
                            user_name + '-' + group_name,
                            user=user_name,
                            groups=[group_name],
                            depends_on=[tf_iam_user, tf_iam_group]
                        )
                    self.add_resource(account_name,
                                      tf_iam_user_group_membership)

                    # if user does not have a gpg key,
                    # a password will not be created.
                    # a gpg key may be added at a later time,
                    # and a password will be generated
                    user_public_gpg_key = users[iu]['public_gpg_key']
                    if user_public_gpg_key is None:
                        msg = \
                            'user {} does not have a public gpg key ' \
                            'and will be created without a password.'.format(
                                user_name)
                        logging.warning(msg)
                        continue
                    if not gpg_key_valid(user_public_gpg_key):
                        msg = \
                            'user {} has an invalid public gpg key.'.format(
                                user_name)
                        logging.error(msg)
                        error = True
                        return error
                    # Ref: terraform aws iam_user_login_profile
                    tf_iam_user_login_profile = aws_iam_user_login_profile(
                        user_name,
                        user=user_name,
                        pgp_key=user_public_gpg_key,
                        depends_on=[tf_iam_user],
                        lifecycle={
                            'ignore_changes': [
                                "id", "password_length",
                                "password_reset_required", "pgp_key"
                            ]
                        })
                    self.add_resource(account_name, tf_iam_user_login_profile)

                    # we want the outputs to be formed into a mail invitation
                    # for each new user. we form an output of the form
                    # 'qrtf.enc-passwords[user_name] = <encrypted password>
                    output_name = '{}.enc-passwords[{}]'.format(
                        self.integration_prefix, user_name)
                    output_value = '${' + tf_iam_user_login_profile.fullname \
                        + '.encrypted_password}'
                    tf_output = output(output_name, value=output_value)
                    self.add_resource(account_name, tf_output)

            user_policies = role['user_policies']
            if user_policies is not None:
                for ip in range(len(user_policies)):
                    policy_name = user_policies[ip]['name']
                    account_name = aws_groups[ig]['account']['name']
                    for iu in range(len(users)):
                        # replace known keys with values
                        user_name = users[iu]['redhat_username']
                        policy = user_policies[ip]['policy']
                        policy = policy.replace('${aws:username}', user_name)
                        policy = policy.replace(
                            '${aws:accountid}',
                            self.variables[account_name]['uid'])

                        # Ref: terraform aws iam_user_policy
                        tf_iam_user = self.get_tf_iam_user(user_name)
                        tf_aws_iam_user_policy = aws_iam_user_policy(
                            user_name + '-' + policy_name,
                            name=user_name + '-' + policy_name,
                            user=user_name,
                            policy=policy,
                            depends_on=[tf_iam_user])
                        self.add_resource(account_name, tf_aws_iam_user_policy)
Beispiel #10
0
    def create_gridnetwork(self, name, machine_type, zone, apply=True):
        """
        args:
            name: name of the compute instance
            machine_type: the type of machine
            zone: zone of your GCP project
            apply: to call terraform apply at the end
        """
        pygrid_network_ip = terrascript.resource.google_compute_address(
            name,
            name=name,
        )
        self.config += pygrid_network_ip

        self.config += terrascript.output(
            name + "-ip",
            value="${" + pygrid_network_ip.address + "}",
        )

        self.expose_port(name="pygrid", apply=False)

        image = terrascript.data.google_compute_image(
            name + "container-optimized-os",
            family="cos-81-lts",
            project="cos-cloud",
        )
        self.config += image

        node = terrascript.resource.google_compute_instance(
            name,
            name=name,
            machine_type=machine_type,
            zone=zone,
            boot_disk={
                "initialize_params": {
                    "image": "${" + image.self_link + "}"
                }
            },
            network_interface={
                "network": "default",
                "access_config": {
                    "nat_ip": "${" + pygrid_network_ip.address + "}"
                },
            },
            metadata_startup_script="""
                #!/bin/bash
                sleep 30;
                docker pull openmined/grid-network:production;
                docker run \
                -e PORT=80 \
                -e DATABASE_URL=sqlite:///databasenode.db \
                --name gridnetwork\
                -p 80:80 \
                -d openmined/grid-network:production;""",
        )
        self.config += node

        with open("main.tf.json", "w") as main_config:
            json.dump(self.config, main_config, indent=2, sort_keys=False)

        if apply:
            if IPython.get_ipython():
                terraform_notebook.apply()
            else:
                terraform_script.apply()
Beispiel #11
0
    def create_droplet(self, droplet_type, number, conn, prov):
        number_str = self.fmt_number(number)
        droplet_name = self.fmt_name(self.variables.name, number)
        droplet_name_dns = self.fmt_name(self.variables.name, number, "-")

        volume = None
        if self.variables.persistent_volumes is not None and number <= len(
                self.variables.persistent_volumes):
            volume = self.variables.persistent_volumes[number - 1].create()

            tmpl_attach = template_file(
                "attach_volume_{}".format(droplet_name),
                template=function.file(
                    os.path.join(self.curdir, "scripts", "attach_volume.sh")),
                vars={
                    "volume_name": "/dev/sda",
                    "mount":
                    self.variables.persistent_volumes[number - 1].mount
                })
            self.o.terrascript.add(tmpl_attach)
            prov.append(
                provisioner("file",
                            content=tmpl_attach.rendered,
                            destination="/tmp/attach_volume.sh"))

            prov.append(
                provisioner("remote-exec",
                            inline=[
                                "chmod +x /tmp/attach_volume.sh",
                                "/tmp/attach_volume.sh"
                            ]))

        droplet = digitalocean_droplet(
            droplet_name,
            ssh_keys=self.variables.ssh_keys,
            image=self.variables.image,
            region=self.variables.region,
            size=self.variables.size,
            private_networking="true",
            backups=self.variables.backups,
            ipv6="false",
            user_data=self.variables.user_data,
            tags=self.get_tags_id(),
            count=1,
            name="{}.{}".format(droplet_name_dns, self.variables.domain),
            connection=conn,
            volume_ids=[volume.id] if not (volume is None) else None,
            provisioner=prov)

        self.o.shared[droplet_type + "_nodes"].append(droplet)
        self.o.terrascript.add(droplet)
        self.o.terrascript.add(
            output("{}_id".format(droplet_name),
                   value=droplet.id,
                   description="The {} node id".format(droplet_type)))
        self.o.terrascript.add(
            output("{}_ipv4_public".format(droplet_name),
                   value=droplet.ipv4_address,
                   description="The {} nodes public ipv4 address".format(
                       droplet_type)))
        self.o.terrascript.add(
            output("{}_ipv4_private".format(droplet_name),
                   value=droplet.ipv4_address_private,
                   description="The {} nodes private ipv4 address".format(
                       droplet_type)))

        if self.variables.create_dns:
            self.create_dns_entry(domain=self.variables.domain,
                                  entry=droplet_name_dns,
                                  ip=droplet.ipv4_address)
            self.create_dns_entry(domain=self.variables.domain,
                                  entry="{}-internal".format(droplet_name_dns),
                                  ip=droplet.ipv4_address_private)
            self.create_dns_entry(domain=self.variables.domain,
                                  entry=self.variables.tags[0],
                                  ip=droplet.ipv4_address,
                                  name="{}-{}".format(droplet_name_dns,
                                                      self.variables.tags[0]))

        return droplet