def test_issue33(): """Issue 33: Suggestion of syntax to terrascript""" ts = Terrascript() var_access_key = ts.add(variable('access_key')) assert isinstance(var_access_key, variable) var_secret_key = ts.add(variable('secret_key')) assert isinstance(var_secret_key, variable) var_region = ts.add(variable('region', default='us-east-1')) assert isinstance(var_region, variable) ts += provider('aws', access_key=var_access_key, secret_key=var_secret_key, region=var_region ) resource_aws_instance = ts.add(aws_instance('example', ami='ami-2757f631', instance_type='t2.micro')) assert isinstance(resource_aws_instance, aws_instance) ts += output('example_public_ip', value=resource_aws_instance.public_ip, description='Public IP of example') assert ts.validate() is True
def test_update_override_value(): ts = Terrascript() ts2 = Terrascript() ts.add(variable('access_key')) ts.add(variable('secret_key')) var_region = ts.add(variable('region', default='us-east-1')) var_region2 = ts2.add(variable('region', default='ca-central-1')) assert var_region._kwargs['default'] != var_region2._kwargs['default'] # ts2 override variable "region" in ts ts.update(ts2) assert isinstance(ts, Terrascript) assert isinstance(ts2, Terrascript) ts_variables = ts.config.get('variable', {}) assert len(ts_variables.items()) == 3 # ensure that only three ts_var_region = ts_variables.get('region', {}) assert ts_var_region.get('default', '') == var_region2._kwargs.get('default') assert ts.validate(delete=False) is True
def ts_digitalocean(version='>= 1.7, < 1.9', **kwargs): ts_local = Terrascript() do_token = ts_local.add(variable('do_token')) default_parms = dict(token=do_token) default_parms.update(kwargs) ts_local.add(provider('digitalocean', version=version, **default_parms)) return ts_local
def ts_cloudflare(version='< 1.9.0', **kwargs): ts_local = Terrascript() cloudflare_email = ts_local.add(variable('cloudflare_email')) cloudflare_token = ts_local.add(variable('cloudflare_token', default='')) default_parms = dict( email=cloudflare_email, token=cloudflare_token, ) default_parms.update(kwargs) ts_local.add(provider('cloudflare', version=version, **default_parms)) return ts_local
def ts_amazon_aws(version='=< 2.30', AWS_REGION_LIST=['us-east-1'], **kwargs): ts_local = Terrascript() aws_access_key = ts_local.add(variable('aws_access_key')) aws_secret_key = ts_local.add(variable('aws_secret_key')) aws_region = ts_local.add(variable('aws_region', default='us-east-1')) default_params = dict( version=version, access_key=aws_access_key, secret_key=aws_secret_key, ) default_params.update(kwargs) # Providers aws_providers_map = {} for region in AWS_REGION_LIST: _provider = provider('aws', region=region, alias=region, **default_params) aws_providers_map[region] = ts_local.add(_provider) ts_local.add(provider('aws', region=aws_region, **default_params)) # VPC aws_vpc_map = {} for region in AWS_REGION_LIST: aws_provider = 'aws.{0}'.format(region) vpc_name = 'vpc-{}'.format(region) aws_vpc_map[region] = ts_local.add( aws_vpc(vpc_name, provider=aws_provider)) return ts_local
def test_update_add_value(): ts = Terrascript() ts2 = Terrascript() var_access_key = ts.add(variable('access_key')) var_secret_key = ts.add(variable('secret_key')) var_region = ts.add(variable('region', default='us-east-1')) ts += provider('aws', access_key=var_access_key, secret_key=var_secret_key, region=var_region) resource_aws_instance = ts.add( aws_instance('example', ami='ami-2757f631', instance_type='t2.micro')) resource_aws_instance_2 = ts2.add( aws_instance('example_second', ami='ami-2757f631', instance_type='t2.micro')) assert isinstance(resource_aws_instance, aws_instance) assert isinstance(resource_aws_instance_2, aws_instance) # ts2 add resource "example_second" to ts ts.update(ts2) assert isinstance(ts, Terrascript) assert isinstance(ts2, Terrascript) ts_resources = ts.config.get('resource', {}) assert len(ts_resources.items()) == 1 ts_aws_instances = ts_resources.get('aws_instance') assert len(ts_aws_instances.items()) == 2 assert resource_aws_instance._name in ts_aws_instances.keys() assert resource_aws_instance_2._name in ts_aws_instances.keys() assert ts.validate(delete=False) is True
def get_aws_ec2_image(name='ubuntu-18-04-minimal', aws_region_list=['us-east-1'], **kwargs): ts_local = Terrascript() filter_value = 'ubuntu-minimal/images/hvm-ssd/ubuntu-bionic-18.04-amd64-minimal-20190723*' filter_value = kwargs.pop('filter_value', '') or filter_value default_params = dict( owners=['099720109477'], most_recent=True, filter=[ dict( name='name', values=[ filter_value ] ), dict( name='root-device-type', values=[ 'ebs' ] ) ] ) default_params.update(kwargs) aws_ami_ubuntu_18_04_minimal_map = {} for region in aws_region_list: _provider = 'aws.{}'.format(region) _aws_ami = ts_local.add(aws_ami( '{0}-{1}'.format(name, region), provider=_provider, **default_params )) aws_ami_ubuntu_18_04_minimal_map[region] = _aws_ami return aws_ami_ubuntu_18_04_minimal_map, ts_local
from terrascript.google.r import * from nw import * from variables import * ts = Terrascript() # metadata_startup_script = '${data.start_up.startup_script.rendered}' inst = ts.add( google_compute_instance( 'test-vm', name='test-pavan-tft', boot_disk=[{ 'initialize_params': [{ 'image': 'debian-cloud/debian-9' }] }], network_interface=[{ 'subnetwork': snw.self_link, 'access_config': {} }], machine_type='n1-standard-1', tags=['test-http', 'test-ssh'], zone=zone, metadata_startup_script= '#!/bin/bash\napt-get update -y\napt-get install nginx -y\nservice nginx start' )) file = open("./terraform/vm.tf.json", "w") file.write(ts.dump()) file.close()
class TerrascriptVSphereVM: def __init__(self, name, guestid, cpu, memory, folder=''): self.ts = Terrascript() self.name = name self.guestid = guestid self.cpu = cpu self.memory = memory self.folder = folder self.provider = None self.datacenter = None self.datastore = None self.template = None self.gateway = None self.disks = [] self.networks = [] self.interfaces = [] self.iface_customization = [] self.dns = [] def setProvider(self, host, username, password): logger = logging.getLogger() logger.debug("Set VSphere provider to {}".format(host)) self.provider = provider( "vsphere", user=username, password=password, vsphere_server=host, allow_unverified_ssl=True) self.ts.add(self.provider) def setDatacenter(self, datacenter): logger = logging.getLogger() logger.debug("Set VSphere datacenter to {}".format(datacenter)) self.datacenter = vsphere_datacenter( "dc", name=datacenter) self.ts.add(self.datacenter) def setDatastore(self, datastore): if not self.datacenter: raise Exception else: logger = logging.getLogger() logger.debug("Set VSphere datastore to {}".format(datastore)) self.datastore = vsphere_datastore( "ds", name=datastore, datacenter_id=self.datacenter.id) self.ts.add(self.datastore) def setResourcePool(self, pool): if not self.datacenter: raise Exception else: logger = logging.getLogger() logger.debug("Set VSphere Resource Pool to {}".format(pool)) self.pool = vsphere_resource_pool( "pool", name=pool, datacenter_id=self.datacenter.id) self.ts.add(self.pool) def setTemplate(self, template): if not self.datacenter: raise Exception else: logger = logging.getLogger() logger.debug("Set VSphere template to {}".format(template)) self.template = data_vsphere_virtual_machine( "template", name=template, datacenter_id=self.datacenter.id) self.ts.add(self.template) def addDisk(self, size): idx = len(self.disks) + 1 logger = logging.getLogger() logger.debug("Add {}GB disk".format(size)) self.disks.append({ "label": "disk{}".format(idx), "size": size, "unit_number": idx}) def addNetworkInterface(self, dvp, ipaddr, cidr): if not self.datacenter: raise Exception else: logger = logging.getLogger() logger.debug("Add network card on {} DVP, with {}/{}".format(dvp, ipaddr, cidr)) vnet = vsphere_network( dvp, name=dvp, datacenter_id=self.datacenter.id) self.networks.append(vnet) self.ts.add(vnet) self.interfaces.append({"network_id": vnet.id}) self.iface_customization.append({ "ipv4_address": ipaddr, "ipv4_netmask": cidr}) def setDomain(self, domain): logger = logging.getLogger() logger.debug("Set {} domain".format(domain)) self.domain = domain def setTimezone(self, timezone): logger = logging.getLogger() logger.debug("Set timezone to {}".format(timezone)) self.timezone = timezone def setGateway(self, gateway): logger = logging.getLogger() logger.debug("Set default gateway to {}".format(gateway)) self.gateway = gateway def addDns(self, dns): logger = logging.getLogger() logger.debug("Add {} to DNS list".format(dns)) self.dns.append(dns) def saveConfiguration(self, filename): linuxOptions = {} linuxOptions["host_name"] = self.name linuxOptions["domain"] = self.domain linuxOptions["time_zone"] = self.timezone customize = {} customize["linux_options"] = linuxOptions customize["network_interface"] = self.iface_customization customize["ipv4_gateway"] = self.gateway customize["dns_server_list"] = self.dns clone = {} clone["template_uuid"] = self.template.id clone["linked_clone"] = False clone["customize"] = customize if self.folder != '': self.virtualMachine = vsphere_virtual_machine( 'vm', name=self.name, resource_pool_id=self.pool.id, datastore_id=self.datastore.id, guest_id=self.guestid, folder=self.folder, num_cpus=self.cpu, memory=self.memory, network_interface=self.interfaces, disk=self.disks, clone=clone) else: self.virtualMachine = vsphere_virtual_machine( 'vm', name=self.name, resource_pool_id=self.pool.id, datastore_id=self.datastore.id, guest_id=self.guestid, num_cpus=self.cpu, memory=self.memory, network_interface=self.interfaces, disk=self.disks, clone=clone) self.ts.add(self.virtualMachine) fd = open(filename, 'w') fd.writelines(self.ts.dump()) fd.close() return 0
from terrascript import Terrascript, provider from terrascript.google.r import * from python_terraform import * ts = Terrascript() region = 'asia-south1' ts += provider('google', credentials='../service_account_gke.json', project='learn-internal', region=region) nw = ts.add( google_compute_network('test-network', name='test-gke-nw', auto_create_subnetworks=False, routing_mode='GLOBAL')) snw = ts.add( google_compute_subnetwork('test-snw', name='test-gke-snw', ip_cidr_range='10.5.0.0/16', region=region, network=nw.name)) gke = ts.add( google_container_cluster( 'primary', name='my-cluster', zone='asia-south1-a', additional_zones=['asia-south1-b', 'asia-south1-c'], cluster_ipv4_cidr='172.16.0.0/16',
class TerraformHclFromDataSource: def __init__(self, record_file_name, resource_type, id_field_name, keywords_names): keywords_names_list = str(keywords_names).replace(" ", "").split(',') keywords_in_record = [ str(v).replace(" ", "").split('=')[0] for v in keywords_names_list ] keywords_in_hcl = [ str(v).replace(" ", "").split('=')[1] if '=' in str(v) else str(v).replace(" ", "").split('=')[0] for v in keywords_names_list ] keywords_names_dict = dict(zip(keywords_in_record, keywords_in_hcl)) self.file = open(record_file_name, 'r') json_data = json.load(self.file) alicloud_factory = factory(alicloud_instance, str(resource_type)) self.alicloud_hcl_store = Terrascript() self.alicloud_cmd_store = [] for record in json_data: assert record.get(id_field_name, "") != "" factory_parameter_dict = {} for k_record, k_hcl in keywords_names_dict.items(): v = json.dumps( record[k_record], sort_keys=True ) if record[k_record] is dict else record[k_record] factory_parameter_dict[k_hcl] = v # factory_parameter_dict[k_hcl] = record[k_record] resource_name = str(resource_type) + '-' + str( record[id_field_name]) one_hcl_item = alicloud_factory(resource_name, **factory_parameter_dict) one_cmd_item = "terraform import {}.{} {}".format( resource_type, resource_name, str(record[id_field_name])) self.alicloud_hcl_store.add(one_hcl_item) self.alicloud_cmd_store.append(one_cmd_item) def dump_hcl(self): """Dump Terraform Code.""" # printjson2hcl(self.alicloud_hcl_store.dump()) print(self.alicloud_hcl_store.dump()) def dump_cmd(self): """Dump Terraform import Command.""" for cmd in self.alicloud_cmd_store: print(cmd) def close(self): if self.file: self.file.close() self.file = None
'/home/nasiruddin_happy/ns002p/service_account.json') ts = Terrascript() ts += provider( 'google', credentials='/home/nasiruddin_happy/ns002p/service_account.json', project='learn-internal', region='us-central1') inst = ts.add( google_compute_instance('test-vm', name='test-nasir1-tft', boot_disk=[{ 'initialize_params': [{ 'image': 'debian-cloud/debian-9' }] }], network_interface=[{ 'network': 'default', 'access_config': {} }], machine_type='n1-standard-1', zone='us-east1-b')) print(ts.dump()) file = open("/home/nasiruddin_happy/ns002p/template/terraform/sample.tf.json", "w") file.write(ts.dump()) file.close()
def process(terrayaml: str, metadata: dict, logger: KopfObjectLogger) -> str: # # User input YAML # env = jinja2.Environment() env.filters['random_password'] = random_password env.filters['random_name'] = random_name template = T(template_text=terrayaml, environment=env).render(**metadata) provision = yaml.load(template, Loader=yaml.FullLoader) logger.info(f"provision this template {provision}") # print(provision) # # Start terraform # meta = provision.pop('meta', {}) team = meta.get('team', 'oss') profile = meta.get('profile', PROFILE) environment = meta.get('environment', 'testing') application = meta.get('application', 'wurkflow') statefile_region = meta.get('statefile_region', 'eu-west-1') ts = Terrascript() ts += Terraform(required_version=">= 0.12.7").backend( "s3", bucket=REMOTE_STATE_S3_BUCKET, key= f"k8/terrayaml-operator/{team}/{environment}/{application}-terraform.tfstate", region=statefile_region, profile=ROOT_PROFILE) # # Extract the notify component # notify = provision.pop('notify') if notify: # tuple of email, key recipient_emails = notify.get('email', []) # append out infra provisioner email recipient_emails.append('*****@*****.**') recipients = get_recipients_from_pgp(recipient_emails=recipient_emails) logger.info(f"notify these emails: {recipient_emails}") # # Parse the yaml # for provider in provision: #print(f"----- output for provider: {provider.upper()} -----") for resource, data in provision.get(provider).items(): #print(f"----- output for resource: {resource} -----") for item in data.get('items', []): api = TF_YAML_MAP.get(resource) outputs = item.pop('outputs', []) item_name = item.pop('name', random_name(value=resource)) tf_resource = api(item_name, **item) ts.add(tf_resource) # handle terraform outputs for opt in outputs: assert getattr( tf_resource, opt.get('value') ), f"{tf_resource} has no attribute {opt.get('value')}" ts.add( Output(opt.get('name'), value=getattr(tf_resource, opt.get('value')))) # Add a provider (+= syntax) ts += Provider('aws', skip_metadata_api_check=True, profile=profile, region=REGION) data = ts.dump() # Plan working_dir = tempfile.mkdtemp(dir='./runs') crd_api = kubernetes.client.CustomObjectsApi() selfLink = metadata.get('selfLink').split('/') # update with planId logger.info(f"planId: {working_dir}") crd_api.patch_namespaced_custom_object( group=selfLink[2], version=selfLink[3], name=selfLink[7], namespace=selfLink[5], plural=selfLink[6], body={"spec": { "planId": working_dir }}) tf_response, tf_code = terraform(working_dir=working_dir, data=data, logger=logger) logger.info(f"Terraform Plan result: {tf_response}") if recipients: logger.info(f"Send email to {recipients}") send_email(to=recipients, attachment=tf_response, message_type='success' if tf_code != 1 else 'error') else: logger.info('No recipients defined') logger.info(f"PlanId is {working_dir}") return f"{working_dir}"
from terrascript import Terrascript, provider from terrascript.google.r import * from variables import * ts = Terrascript() ## Add Resources to Terraform files nw = ts.add( google_compute_network('test-network', name='test-pavan-nw', auto_create_subnetworks=False, routing_mode='GLOBAL')) snw = ts.add( google_compute_subnetwork('test-snw', name='test-pavan-snw', ip_cidr_range='10.5.0.0/16', region=region, network=nw.name)) firewall80 = ts.add( google_compute_firewall('test-firewall-80', name='test-pavan-80', network=nw.name, allow=[{ 'protocol': 'tcp', 'ports': ['80', '443'] }], source_ranges=['0.0.0.0/0'], target_tags=['test-http'])) firewall22 = ts.add( google_compute_firewall('test-firewall-22',
def wireguard_terraform(region, ami): ts = Terrascript() ts += provider('aws', access_key=ACCESS_KEY, secret_key=SECRET_KEY, region=region) # If key already exists, an error with be thrown and infrastructure will not build key = ts.add( aws_key_pair('wireguard_key', key_name="wireguard", public_key=PUBLIC_KEY)) # If security groups already exist, an error will be thrown but infrastructure will still build sg_ssh = ts.add( aws_security_group('allow_ssh', name='allow_ssh', description='default', ingress=[ { 'from_port': 22, 'to_port': 22, 'protocol': 'tcp', 'cidr_blocks': ['0.0.0.0/0'] }, ], egress=[{ 'from_port': 0, 'to_port': 0, 'protocol': '-1', 'cidr_blocks': ['0.0.0.0/0'] }])) sg_wg = ts.add( aws_security_group('wg', name='wg', description='Open port for wireguard', ingress=[ { 'from_port': 54321, 'to_port': 54321, 'protocol': 'tcp', 'cidr_blocks': ["0.0.0.0/0"] }, ])) inst = ts.add( aws_instance( 'wireguard_test', ami=ami, instance_type='t2.micro', security_groups=["allow_ssh", "wg"], key_name="wireguard", tags=[{ 'Name': 'wireguard' }], )) eip = ts.add(aws_eip('default', instance=inst.id, vpc=True)) a_ip = ts.add( aws_eip_association('eip_assoc', instance_id=inst.id, allocation_id=eip.id)) with open('wireguard.tf', 'w') as tf: tf.write(ts.dump()) subprocess.run([ 'terraform', 'init', ]) subprocess.run(['terraform', 'apply', '-auto-approve'])