Exemplo n.º 1
0
def create_variables(app_name, app_port, cluster_name):
	var_file = config.tf_dir + "/variables.tf" 
	ts = Terrascript()

	var_access_key = variable('aws_access_key', default='')
	var_secret_key = variable('aws_secret_key', default='')
	var_region = variable('region', default='eu-west-1')
	var_ami = variable('ami', default='ami-066826c6a40879d75')
	var_az_qty = variable('az_qty', default='2')
	var_cluster_name = variable('ecs_cluster_name', default=cluster_name)
	var_instance_type = variable('instance_type', default='t2.micro')
	var_key_name = variable('key_name', default='rep')
	var_app_port = variable('app_port', default=app_port)
	var_app_name = variable('app_name', default=app_name)

	ts += var_access_key
	ts += var_secret_key
	ts += var_region
	ts += var_ami
	ts += var_az_qty
	ts += var_cluster_name
	ts += var_instance_type
	ts += var_key_name
	ts += var_app_port
	ts += var_app_name

	print ("Creating file: " + var_file)
	with open(var_file, 'w') as tfile:
		tfile.write(ts.dump())
Exemplo n.º 2
0
def test_issue32():
    """Issue 32: provisioner return one line with provisioner.file instead a dictionary."""

    ts = Terrascript()

    ts += provider('aws', region='ap-southeast-2')

    p = provisioner('local-exec', command='date > $(mktemp tmpXXXXXXX.terrascript)')

    ts += aws_instance('I1', ami='ami-60a26a02', instance_type='t2.nano', provisioner=p)

    j = ts.dump()

    assert 'mktemp' in j

    assert ts.validate() is True
Exemplo n.º 3
0
from terrascript.google.r import *
from nw import *
from variables import *

ts = Terrascript()
# metadata_startup_script = '${data.start_up.startup_script.rendered}'

inst = ts.add(
    google_compute_instance(
        'test-vm',
        name='test-pavan-tft',
        boot_disk=[{
            'initialize_params': [{
                'image': 'debian-cloud/debian-9'
            }]
        }],
        network_interface=[{
            'subnetwork': snw.self_link,
            'access_config': {}
        }],
        machine_type='n1-standard-1',
        tags=['test-http', 'test-ssh'],
        zone=zone,
        metadata_startup_script=
        '#!/bin/bash\napt-get update -y\napt-get install nginx -y\nservice nginx start'
    ))

file = open("./terraform/vm.tf.json", "w")
file.write(ts.dump())
file.close()
class TerrascriptVSphereVM:
    def __init__(self, name, guestid, cpu, memory, folder=''):
        self.ts = Terrascript()
        self.name = name
        self.guestid = guestid
        self.cpu = cpu
        self.memory = memory
        self.folder = folder
        self.provider = None
        self.datacenter = None
        self.datastore = None
        self.template = None
        self.gateway = None
        self.disks = []
        self.networks = []
        self.interfaces = []
        self.iface_customization = []
        self.dns = []

    def setProvider(self, host, username, password):
        logger = logging.getLogger()
        logger.debug("Set VSphere provider to {}".format(host))
        self.provider = provider(
            "vsphere",
            user=username,
            password=password,
            vsphere_server=host,
            allow_unverified_ssl=True)
        self.ts.add(self.provider)

    def setDatacenter(self, datacenter):
        logger = logging.getLogger()
        logger.debug("Set VSphere datacenter to {}".format(datacenter))
        self.datacenter = vsphere_datacenter(
            "dc",
            name=datacenter)
        self.ts.add(self.datacenter)

    def setDatastore(self, datastore):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere datastore to {}".format(datastore))
            self.datastore = vsphere_datastore(
                "ds",
                name=datastore,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.datastore)

    def setResourcePool(self, pool):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere Resource Pool to {}".format(pool))
            self.pool = vsphere_resource_pool(
                "pool",
                name=pool,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.pool)

    def setTemplate(self, template):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere template to {}".format(template))
            self.template = data_vsphere_virtual_machine(
                "template",
                name=template,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.template)

    def addDisk(self, size):
        idx = len(self.disks) + 1
        logger = logging.getLogger()
        logger.debug("Add {}GB disk".format(size))
        self.disks.append({
            "label": "disk{}".format(idx),
            "size": size,
            "unit_number": idx})

    def addNetworkInterface(self, dvp, ipaddr, cidr):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Add network card on {} DVP, with {}/{}".format(dvp, ipaddr, cidr))
            vnet = vsphere_network(
                dvp,
                name=dvp,
                datacenter_id=self.datacenter.id)
            self.networks.append(vnet)
            self.ts.add(vnet)
            self.interfaces.append({"network_id": vnet.id})
            self.iface_customization.append({
                "ipv4_address": ipaddr,
                "ipv4_netmask": cidr})

    def setDomain(self, domain):
        logger = logging.getLogger()
        logger.debug("Set {} domain".format(domain))
        self.domain = domain

    def setTimezone(self, timezone):
        logger = logging.getLogger()
        logger.debug("Set timezone to {}".format(timezone))
        self.timezone = timezone

    def setGateway(self, gateway):
        logger = logging.getLogger()
        logger.debug("Set default gateway to {}".format(gateway))
        self.gateway = gateway

    def addDns(self, dns):
        logger = logging.getLogger()
        logger.debug("Add {} to DNS list".format(dns))
        self.dns.append(dns)

    def saveConfiguration(self, filename):
        linuxOptions = {}
        linuxOptions["host_name"] = self.name
        linuxOptions["domain"] = self.domain
        linuxOptions["time_zone"] = self.timezone

        customize = {}
        customize["linux_options"] = linuxOptions
        customize["network_interface"] = self.iface_customization
        customize["ipv4_gateway"] = self.gateway
        customize["dns_server_list"] = self.dns

        clone = {}
        clone["template_uuid"] = self.template.id
        clone["linked_clone"] = False
        clone["customize"] = customize

        if self.folder != '':
            self.virtualMachine = vsphere_virtual_machine(
                'vm',
                name=self.name,
                resource_pool_id=self.pool.id,
                datastore_id=self.datastore.id,
                guest_id=self.guestid,
                folder=self.folder,
                num_cpus=self.cpu,
                memory=self.memory,
                network_interface=self.interfaces,
                disk=self.disks,
                clone=clone)
        else:
            self.virtualMachine = vsphere_virtual_machine(
                'vm',
                name=self.name,
                resource_pool_id=self.pool.id,
                datastore_id=self.datastore.id,
                guest_id=self.guestid,
                num_cpus=self.cpu,
                memory=self.memory,
                network_interface=self.interfaces,
                disk=self.disks,
                clone=clone)
        self.ts.add(self.virtualMachine)
        fd = open(filename, 'w')
        fd.writelines(self.ts.dump())
        fd.close()
        return 0
class TerraformHclFromDataSource:
    def __init__(self, record_file_name, resource_type, id_field_name,
                 keywords_names):

        keywords_names_list = str(keywords_names).replace(" ", "").split(',')
        keywords_in_record = [
            str(v).replace(" ", "").split('=')[0] for v in keywords_names_list
        ]
        keywords_in_hcl = [
            str(v).replace(" ", "").split('=')[1]
            if '=' in str(v) else str(v).replace(" ", "").split('=')[0]
            for v in keywords_names_list
        ]
        keywords_names_dict = dict(zip(keywords_in_record, keywords_in_hcl))

        self.file = open(record_file_name, 'r')

        json_data = json.load(self.file)

        alicloud_factory = factory(alicloud_instance, str(resource_type))

        self.alicloud_hcl_store = Terrascript()
        self.alicloud_cmd_store = []

        for record in json_data:
            assert record.get(id_field_name, "") != ""
            factory_parameter_dict = {}
            for k_record, k_hcl in keywords_names_dict.items():
                v = json.dumps(
                    record[k_record], sort_keys=True
                ) if record[k_record] is dict else record[k_record]
                factory_parameter_dict[k_hcl] = v
                # factory_parameter_dict[k_hcl] = record[k_record]

            resource_name = str(resource_type) + '-' + str(
                record[id_field_name])

            one_hcl_item = alicloud_factory(resource_name,
                                            **factory_parameter_dict)
            one_cmd_item = "terraform import {}.{} {}".format(
                resource_type, resource_name, str(record[id_field_name]))

            self.alicloud_hcl_store.add(one_hcl_item)
            self.alicloud_cmd_store.append(one_cmd_item)

    def dump_hcl(self):
        """Dump Terraform Code."""

        # printjson2hcl(self.alicloud_hcl_store.dump())
        print(self.alicloud_hcl_store.dump())

    def dump_cmd(self):
        """Dump Terraform import Command."""

        for cmd in self.alicloud_cmd_store:
            print(cmd)

    def close(self):
        if self.file:
            self.file.close()
            self.file = None
Exemplo n.º 6
0
    project='learn-internal',
    region='us-central1')
inst = ts.add(
    google_compute_instance('test-vm',
                            name='test-nasir1-tft',
                            boot_disk=[{
                                'initialize_params': [{
                                    'image':
                                    'debian-cloud/debian-9'
                                }]
                            }],
                            network_interface=[{
                                'network': 'default',
                                'access_config': {}
                            }],
                            machine_type='n1-standard-1',
                            zone='us-east1-b'))

print(ts.dump())

file = open("/home/nasiruddin_happy/ns002p/template/terraform/sample.tf.json",
            "w")
file.write(ts.dump())
file.close()

tf = Terraform(working_dir='/home/nasiruddin_happy/ns002p/template/terraform')
tf.init()
approve = {"auto-approve": True}
print(tf.plan())
print(tf.apply(**approve))
Exemplo n.º 7
0
def process(terrayaml: str, metadata: dict, logger: KopfObjectLogger) -> str:
    #
    # User input YAML
    #
    env = jinja2.Environment()
    env.filters['random_password'] = random_password
    env.filters['random_name'] = random_name

    template = T(template_text=terrayaml, environment=env).render(**metadata)

    provision = yaml.load(template, Loader=yaml.FullLoader)
    logger.info(f"provision this template {provision}")
    # print(provision)

    #
    # Start terraform
    #
    meta = provision.pop('meta', {})
    team = meta.get('team', 'oss')
    profile = meta.get('profile', PROFILE)
    environment = meta.get('environment', 'testing')
    application = meta.get('application', 'wurkflow')
    statefile_region = meta.get('statefile_region', 'eu-west-1')

    ts = Terrascript()
    ts += Terraform(required_version=">= 0.12.7").backend(
        "s3",
        bucket=REMOTE_STATE_S3_BUCKET,
        key=
        f"k8/terrayaml-operator/{team}/{environment}/{application}-terraform.tfstate",
        region=statefile_region,
        profile=ROOT_PROFILE)

    #
    # Extract the notify component
    #
    notify = provision.pop('notify')
    if notify:
        # tuple of email, key
        recipient_emails = notify.get('email', [])
        # append out infra provisioner email
        recipient_emails.append('*****@*****.**')
        recipients = get_recipients_from_pgp(recipient_emails=recipient_emails)
        logger.info(f"notify these emails: {recipient_emails}")

    #
    # Parse the yaml
    #
    for provider in provision:
        #print(f"----- output for provider: {provider.upper()} -----")
        for resource, data in provision.get(provider).items():
            #print(f"----- output for resource: {resource} -----")
            for item in data.get('items', []):
                api = TF_YAML_MAP.get(resource)
                outputs = item.pop('outputs', [])
                item_name = item.pop('name', random_name(value=resource))
                tf_resource = api(item_name, **item)
                ts.add(tf_resource)

                # handle terraform outputs
                for opt in outputs:
                    assert getattr(
                        tf_resource, opt.get('value')
                    ), f"{tf_resource} has no attribute {opt.get('value')}"
                    ts.add(
                        Output(opt.get('name'),
                               value=getattr(tf_resource, opt.get('value'))))

    # Add a provider (+= syntax)
    ts += Provider('aws',
                   skip_metadata_api_check=True,
                   profile=profile,
                   region=REGION)
    data = ts.dump()

    # Plan
    working_dir = tempfile.mkdtemp(dir='./runs')
    crd_api = kubernetes.client.CustomObjectsApi()
    selfLink = metadata.get('selfLink').split('/')
    # update with planId
    logger.info(f"planId: {working_dir}")
    crd_api.patch_namespaced_custom_object(
        group=selfLink[2],
        version=selfLink[3],
        name=selfLink[7],
        namespace=selfLink[5],
        plural=selfLink[6],
        body={"spec": {
            "planId": working_dir
        }})

    tf_response, tf_code = terraform(working_dir=working_dir,
                                     data=data,
                                     logger=logger)
    logger.info(f"Terraform Plan result: {tf_response}")

    if recipients:
        logger.info(f"Send email to {recipients}")
        send_email(to=recipients,
                   attachment=tf_response,
                   message_type='success' if tf_code != 1 else 'error')
    else:
        logger.info('No recipients defined')
    logger.info(f"PlanId is {working_dir}")
    return f"{working_dir}"
Exemplo n.º 8
0
def wireguard_terraform(region, ami):

    ts = Terrascript()

    ts += provider('aws',
                   access_key=ACCESS_KEY,
                   secret_key=SECRET_KEY,
                   region=region)

    # If key already exists, an error with be thrown and infrastructure will not build
    key = ts.add(
        aws_key_pair('wireguard_key',
                     key_name="wireguard",
                     public_key=PUBLIC_KEY))

    # If security groups already exist, an error will be thrown but infrastructure will still build
    sg_ssh = ts.add(
        aws_security_group('allow_ssh',
                           name='allow_ssh',
                           description='default',
                           ingress=[
                               {
                                   'from_port': 22,
                                   'to_port': 22,
                                   'protocol': 'tcp',
                                   'cidr_blocks': ['0.0.0.0/0']
                               },
                           ],
                           egress=[{
                               'from_port': 0,
                               'to_port': 0,
                               'protocol': '-1',
                               'cidr_blocks': ['0.0.0.0/0']
                           }]))

    sg_wg = ts.add(
        aws_security_group('wg',
                           name='wg',
                           description='Open port for wireguard',
                           ingress=[
                               {
                                   'from_port': 54321,
                                   'to_port': 54321,
                                   'protocol': 'tcp',
                                   'cidr_blocks': ["0.0.0.0/0"]
                               },
                           ]))

    inst = ts.add(
        aws_instance(
            'wireguard_test',
            ami=ami,
            instance_type='t2.micro',
            security_groups=["allow_ssh", "wg"],
            key_name="wireguard",
            tags=[{
                'Name': 'wireguard'
            }],
        ))

    eip = ts.add(aws_eip('default', instance=inst.id, vpc=True))

    a_ip = ts.add(
        aws_eip_association('eip_assoc',
                            instance_id=inst.id,
                            allocation_id=eip.id))

    with open('wireguard.tf', 'w') as tf:
        tf.write(ts.dump())

    subprocess.run([
        'terraform',
        'init',
    ])
    subprocess.run(['terraform', 'apply', '-auto-approve'])