Beispiel #1
0
def test_issue35():
    """Issue 35: missing some vSphere data sources and resources."""

    ts = Terrascript()

    ts += r.vsphere_custom_attribute(
        "attribute",
        name="terraform-test-attribute",
        managed_object_type="VirtualMachine",
    )

    ts += r.vsphere_datastore_cluster(
        "datastore_cluster",
        name="terraform-datastore-cluster-test",
        datacenter_id="ID",
        sdrs_enabled=True,
    )

    ts += r.vsphere_storage_drs_vm_override(
        "drs_vm_override",
        datastore_cluster_id="ID1",
        virtual_machine_id="IS2",
        sdrs_enabled=False,
    )

    ts += d.vsphere_custom_attribute("attribute",
                                     name="terraform-test-attribute")

    ts += d.vsphere_datastore_cluster("datastore_cluster",
                                      name="datastore-cluster1",
                                      datacenter_id="ID")

    assert ts.validate() is True
Beispiel #2
0
def test_issue35():
    """Issue 35: missing some vSphere data sources and resources."""

    ts = Terrascript()

    ts += r.vsphere_custom_attribute('attribute',
                                     name='terraform-test-attribute',
                                     managed_object_type='VirtualMachine')

    ts += r.vsphere_datastore_cluster('datastore_cluster',
                                      name='terraform-datastore-cluster-test',
                                      datacenter_id='ID',
                                      sdrs_enabled=True)

    ts += r.vsphere_storage_drs_vm_override('drs_vm_override',
                                            datastore_cluster_id='ID1',
                                            virtual_machine_id='IS2',
                                            sdrs_enabled=False)

    ts += d.vsphere_custom_attribute('attribute',
                                     name='terraform-test-attribute')

    ts += d.vsphere_datastore_cluster('datastore_cluster',
                                      name='datastore-cluster1',
                                      datacenter_id='ID')

    assert ts.validate() is True
Beispiel #3
0
def create_variables(app_name, app_port, cluster_name):
	var_file = config.tf_dir + "/variables.tf" 
	ts = Terrascript()

	var_access_key = variable('aws_access_key', default='')
	var_secret_key = variable('aws_secret_key', default='')
	var_region = variable('region', default='eu-west-1')
	var_ami = variable('ami', default='ami-066826c6a40879d75')
	var_az_qty = variable('az_qty', default='2')
	var_cluster_name = variable('ecs_cluster_name', default=cluster_name)
	var_instance_type = variable('instance_type', default='t2.micro')
	var_key_name = variable('key_name', default='rep')
	var_app_port = variable('app_port', default=app_port)
	var_app_name = variable('app_name', default=app_name)

	ts += var_access_key
	ts += var_secret_key
	ts += var_region
	ts += var_ami
	ts += var_az_qty
	ts += var_cluster_name
	ts += var_instance_type
	ts += var_key_name
	ts += var_app_port
	ts += var_app_name

	print ("Creating file: " + var_file)
	with open(var_file, 'w') as tfile:
		tfile.write(ts.dump())
def test_update_raise_error():
    ts = Terrascript()
    var_region = variable('region')
    exception_raised = False
    try:
        ts.update(var_region)
    except TypeError as e:
        exception_raised = True

    assert exception_raised is True
def ts_digitalocean(version='>= 1.7, < 1.9', **kwargs):
    ts_local = Terrascript()

    do_token = ts_local.add(variable('do_token'))

    default_parms = dict(token=do_token)
    default_parms.update(kwargs)

    ts_local.add(provider('digitalocean', version=version, **default_parms))

    return ts_local
def ts_amazon_aws(version='=< 2.30', AWS_REGION_LIST=['us-east-1'], **kwargs):
    ts_local = Terrascript()

    aws_access_key = ts_local.add(variable('aws_access_key'))
    aws_secret_key = ts_local.add(variable('aws_secret_key'))
    aws_region = ts_local.add(variable('aws_region', default='us-east-1'))

    default_params = dict(
        version=version,
        access_key=aws_access_key,
        secret_key=aws_secret_key,
    )
    default_params.update(kwargs)

    # Providers
    aws_providers_map = {}
    for region in AWS_REGION_LIST:
        _provider = provider('aws',
                             region=region,
                             alias=region,
                             **default_params)
        aws_providers_map[region] = ts_local.add(_provider)

    ts_local.add(provider('aws', region=aws_region, **default_params))

    # VPC
    aws_vpc_map = {}
    for region in AWS_REGION_LIST:
        aws_provider = 'aws.{0}'.format(region)
        vpc_name = 'vpc-{}'.format(region)
        aws_vpc_map[region] = ts_local.add(
            aws_vpc(vpc_name, provider=aws_provider))

    return ts_local
def test_issue32():
    """Issue 32: provisioner return one line with provisioner.file instead a dictionary."""

    ts = Terrascript()

    ts += provider('aws', region='ap-southeast-2')

    p = provisioner('local-exec', command='date > $(mktemp tmpXXXXXXX.terrascript)')

    ts += aws_instance('I1', ami='ami-60a26a02', instance_type='t2.nano', provisioner=p)

    j = ts.dump()

    assert 'mktemp' in j

    assert ts.validate() is True
Beispiel #8
0
    def upload_file(
        self,
        content: str,
        *,
        destination: str = DEFAULT_UPLOAD_PATH,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        file_resource = self.null_resource(
            "upload_file_resource",
            provisioner=provisioner(
                self.TERRAFORM_RESOURCE_FILE,
                content=content,
                destination=destination,
                connection=ssh_conn,
            ),
        )

        upload_config += file_resource
        return upload_config
Beispiel #9
0
 def __init__(self,
              integration,
              integration_prefix,
              thread_pool_size,
              oc_map={}):
     self.integration = integration
     self.integration_prefix = integration_prefix
     self.oc_map = oc_map
     self.thread_pool_size = thread_pool_size
     self.populate_configs_and_vars_from_vault()
     tss = {}
     locks = {}
     for name, config in self.configs.items():
         # Ref: https://github.com/mjuenema/python-terrascript#example
         ts = Terrascript()
         ts += provider('aws',
                        access_key=config['aws_access_key_id'],
                        secret_key=config['aws_secret_access_key'],
                        version=config['aws_provider_version'],
                        region=config['region'])
         b = backend("s3",
                     access_key=config['aws_access_key_id'],
                     secret_key=config['aws_secret_access_key'],
                     bucket=config['bucket'],
                     key=config['{}_key'.format(integration)],
                     region=config['region'])
         ts += terraform(backend=b)
         tss[name] = ts
         locks[name] = Lock()
     self.tss = tss
     self.locks = locks
 def __init__(self, name, guestid, cpu, memory, folder=''):
     self.ts = Terrascript()
     self.name = name
     self.guestid = guestid
     self.cpu = cpu
     self.memory = memory
     self.folder = folder
     self.provider = None
     self.datacenter = None
     self.datastore = None
     self.template = None
     self.gateway = None
     self.disks = []
     self.networks = []
     self.interfaces = []
     self.iface_customization = []
     self.dns = []
Beispiel #11
0
def test_issue33():
    """Issue 33: Suggestion of syntax to terrascript"""

    ts = Terrascript()

    var_access_key = ts.add(variable('access_key'))
    assert isinstance(var_access_key, variable)

    var_secret_key = ts.add(variable('secret_key'))
    assert isinstance(var_secret_key, variable)

    var_region = ts.add(variable('region', default='us-east-1'))
    assert isinstance(var_region, variable)

    ts += provider('aws', access_key=var_access_key, 
                   secret_key=var_secret_key, region=var_region
    )

    resource_aws_instance = ts.add(aws_instance('example', ami='ami-2757f631', instance_type='t2.micro'))
    assert isinstance(resource_aws_instance, aws_instance)

    ts += output('example_public_ip', value=resource_aws_instance.public_ip, 
                 description='Public IP of example')

    assert ts.validate() is True
    def __init__(self, record_file_name, resource_type, id_field_name,
                 keywords_names):

        keywords_names_list = str(keywords_names).replace(" ", "").split(',')
        keywords_in_record = [
            str(v).replace(" ", "").split('=')[0] for v in keywords_names_list
        ]
        keywords_in_hcl = [
            str(v).replace(" ", "").split('=')[1]
            if '=' in str(v) else str(v).replace(" ", "").split('=')[0]
            for v in keywords_names_list
        ]
        keywords_names_dict = dict(zip(keywords_in_record, keywords_in_hcl))

        self.file = open(record_file_name, 'r')

        json_data = json.load(self.file)

        alicloud_factory = factory(alicloud_instance, str(resource_type))

        self.alicloud_hcl_store = Terrascript()
        self.alicloud_cmd_store = []

        for record in json_data:
            assert record.get(id_field_name, "") != ""
            factory_parameter_dict = {}
            for k_record, k_hcl in keywords_names_dict.items():
                v = json.dumps(
                    record[k_record], sort_keys=True
                ) if record[k_record] is dict else record[k_record]
                factory_parameter_dict[k_hcl] = v
                # factory_parameter_dict[k_hcl] = record[k_record]

            resource_name = str(resource_type) + '-' + str(
                record[id_field_name])

            one_hcl_item = alicloud_factory(resource_name,
                                            **factory_parameter_dict)
            one_cmd_item = "terraform import {}.{} {}".format(
                resource_type, resource_name, str(record[id_field_name]))

            self.alicloud_hcl_store.add(one_hcl_item)
            self.alicloud_cmd_store.append(one_cmd_item)
Beispiel #13
0
def get_aws_ec2_image(name='ubuntu-18-04-minimal',
                      aws_region_list=['us-east-1'], **kwargs):
    ts_local = Terrascript()

    filter_value = 'ubuntu-minimal/images/hvm-ssd/ubuntu-bionic-18.04-amd64-minimal-20190723*'
    filter_value = kwargs.pop('filter_value', '') or filter_value

    default_params = dict(
        owners=['099720109477'],
        most_recent=True,
        filter=[
            dict(
                name='name',
                values=[
                    filter_value
                ]
            ),
            dict(
                name='root-device-type',
                values=[
                    'ebs'
                ]
            )
        ]
    )
    default_params.update(kwargs)

    aws_ami_ubuntu_18_04_minimal_map = {}
    for region in aws_region_list:
        _provider = 'aws.{}'.format(region)
        _aws_ami = ts_local.add(aws_ami(
            '{0}-{1}'.format(name, region),
            provider=_provider,
            **default_params
        ))
        aws_ami_ubuntu_18_04_minimal_map[region] = _aws_ami

    return aws_ami_ubuntu_18_04_minimal_map, ts_local
Beispiel #14
0
    def remote_exec(self,
                    *,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        exec_resource = self.null_resource('remote-exec',
                                           provisioner=provisioner(
                                               "remote-exec",
                                               inline=['ls -la'],
                                               connection=ssh_conn))

        exec_config += exec_resource
        return exec_config
def ts_cloudflare(version='< 1.9.0', **kwargs):
    ts_local = Terrascript()

    cloudflare_email = ts_local.add(variable('cloudflare_email'))
    cloudflare_token = ts_local.add(variable('cloudflare_token', default=''))

    default_parms = dict(
        email=cloudflare_email,
        token=cloudflare_token,
    )
    default_parms.update(kwargs)

    ts_local.add(provider('cloudflare', version=version, **default_parms))
    return ts_local
Beispiel #16
0
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(
            "server",
            plan_id=config_data["plan_code"],
            region_id=config_data["region_code"],
            os_id=config_data["os_code"],
            hostname=config_data["hostname"],
            ssh_key_ids=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
        vultr_output_id = Output("server_id",
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key("vultr_ssh_key",
                                              name="default_key",
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config
Beispiel #17
0
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            "server",
            image=config_data["os_code"],
            name=config_data["hostname"],
            region=config_data["region_code"],
            size=config_data["plan_code"],
            ssh_keys=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource["ssh_keys"] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            "ip", value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output("server_id",
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config
Beispiel #18
0
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(f"server",
                                        plan_id=config_data['plan_code'],
                                        region_id=config_data['region_code'],
                                        os_id=config_data['os_code'],
                                        hostname=config_data['hostname'],
                                        ssh_key_ids=config_data['ssh_keys']
                                        if config_data.get('ssh_keys') else [])
        vultr_output_ip = Output('ip', value="${vultr_server.server.main_ip}")
        vultr_output_id = Output('server_id',
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key('vultr_ssh_key',
                                              name='default_key',
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config
Beispiel #19
0
from terrascript import Terrascript, provider
from terrascript.google.r import *
from variables import *
ts = Terrascript()
## Add Resources to Terraform files
nw = ts.add(
    google_compute_network('test-network',
                           name='test-pavan-nw',
                           auto_create_subnetworks=False,
                           routing_mode='GLOBAL'))

snw = ts.add(
    google_compute_subnetwork('test-snw',
                              name='test-pavan-snw',
                              ip_cidr_range='10.5.0.0/16',
                              region=region,
                              network=nw.name))

firewall80 = ts.add(
    google_compute_firewall('test-firewall-80',
                            name='test-pavan-80',
                            network=nw.name,
                            allow=[{
                                'protocol': 'tcp',
                                'ports': ['80', '443']
                            }],
                            source_ranges=['0.0.0.0/0'],
                            target_tags=['test-http']))

firewall22 = ts.add(
    google_compute_firewall('test-firewall-22',
def test_update_add_value():
    ts = Terrascript()
    ts2 = Terrascript()

    var_access_key = ts.add(variable('access_key'))
    var_secret_key = ts.add(variable('secret_key'))
    var_region = ts.add(variable('region', default='us-east-1'))

    ts += provider('aws',
                   access_key=var_access_key,
                   secret_key=var_secret_key,
                   region=var_region)

    resource_aws_instance = ts.add(
        aws_instance('example', ami='ami-2757f631', instance_type='t2.micro'))
    resource_aws_instance_2 = ts2.add(
        aws_instance('example_second',
                     ami='ami-2757f631',
                     instance_type='t2.micro'))
    assert isinstance(resource_aws_instance, aws_instance)
    assert isinstance(resource_aws_instance_2, aws_instance)

    # ts2 add resource "example_second" to ts
    ts.update(ts2)
    assert isinstance(ts, Terrascript)
    assert isinstance(ts2, Terrascript)

    ts_resources = ts.config.get('resource', {})
    assert len(ts_resources.items()) == 1
    ts_aws_instances = ts_resources.get('aws_instance')
    assert len(ts_aws_instances.items()) == 2

    assert resource_aws_instance._name in ts_aws_instances.keys()
    assert resource_aws_instance_2._name in ts_aws_instances.keys()

    assert ts.validate(delete=False) is True
def test_update_override_value():
    ts = Terrascript()
    ts2 = Terrascript()

    ts.add(variable('access_key'))
    ts.add(variable('secret_key'))

    var_region = ts.add(variable('region', default='us-east-1'))
    var_region2 = ts2.add(variable('region', default='ca-central-1'))

    assert var_region._kwargs['default'] != var_region2._kwargs['default']

    # ts2 override variable "region" in ts
    ts.update(ts2)
    assert isinstance(ts, Terrascript)
    assert isinstance(ts2, Terrascript)

    ts_variables = ts.config.get('variable', {})
    assert len(ts_variables.items()) == 3  # ensure that only three

    ts_var_region = ts_variables.get('region', {})
    assert ts_var_region.get('default',
                             '') == var_region2._kwargs.get('default')

    assert ts.validate(delete=False) is True
Beispiel #22
0
def process(terrayaml: str, metadata: dict, logger: KopfObjectLogger) -> str:
    #
    # User input YAML
    #
    env = jinja2.Environment()
    env.filters['random_password'] = random_password
    env.filters['random_name'] = random_name

    template = T(template_text=terrayaml, environment=env).render(**metadata)

    provision = yaml.load(template, Loader=yaml.FullLoader)
    logger.info(f"provision this template {provision}")
    # print(provision)

    #
    # Start terraform
    #
    meta = provision.pop('meta', {})
    team = meta.get('team', 'oss')
    profile = meta.get('profile', PROFILE)
    environment = meta.get('environment', 'testing')
    application = meta.get('application', 'wurkflow')
    statefile_region = meta.get('statefile_region', 'eu-west-1')

    ts = Terrascript()
    ts += Terraform(required_version=">= 0.12.7").backend(
        "s3",
        bucket=REMOTE_STATE_S3_BUCKET,
        key=
        f"k8/terrayaml-operator/{team}/{environment}/{application}-terraform.tfstate",
        region=statefile_region,
        profile=ROOT_PROFILE)

    #
    # Extract the notify component
    #
    notify = provision.pop('notify')
    if notify:
        # tuple of email, key
        recipient_emails = notify.get('email', [])
        # append out infra provisioner email
        recipient_emails.append('*****@*****.**')
        recipients = get_recipients_from_pgp(recipient_emails=recipient_emails)
        logger.info(f"notify these emails: {recipient_emails}")

    #
    # Parse the yaml
    #
    for provider in provision:
        #print(f"----- output for provider: {provider.upper()} -----")
        for resource, data in provision.get(provider).items():
            #print(f"----- output for resource: {resource} -----")
            for item in data.get('items', []):
                api = TF_YAML_MAP.get(resource)
                outputs = item.pop('outputs', [])
                item_name = item.pop('name', random_name(value=resource))
                tf_resource = api(item_name, **item)
                ts.add(tf_resource)

                # handle terraform outputs
                for opt in outputs:
                    assert getattr(
                        tf_resource, opt.get('value')
                    ), f"{tf_resource} has no attribute {opt.get('value')}"
                    ts.add(
                        Output(opt.get('name'),
                               value=getattr(tf_resource, opt.get('value'))))

    # Add a provider (+= syntax)
    ts += Provider('aws',
                   skip_metadata_api_check=True,
                   profile=profile,
                   region=REGION)
    data = ts.dump()

    # Plan
    working_dir = tempfile.mkdtemp(dir='./runs')
    crd_api = kubernetes.client.CustomObjectsApi()
    selfLink = metadata.get('selfLink').split('/')
    # update with planId
    logger.info(f"planId: {working_dir}")
    crd_api.patch_namespaced_custom_object(
        group=selfLink[2],
        version=selfLink[3],
        name=selfLink[7],
        namespace=selfLink[5],
        plural=selfLink[6],
        body={"spec": {
            "planId": working_dir
        }})

    tf_response, tf_code = terraform(working_dir=working_dir,
                                     data=data,
                                     logger=logger)
    logger.info(f"Terraform Plan result: {tf_response}")

    if recipients:
        logger.info(f"Send email to {recipients}")
        send_email(to=recipients,
                   attachment=tf_response,
                   message_type='success' if tf_code != 1 else 'error')
    else:
        logger.info('No recipients defined')
    logger.info(f"PlanId is {working_dir}")
    return f"{working_dir}"
Beispiel #23
0
 def __init__(self, name):
     self.name = name
     self.ts = Terrascript()
Beispiel #24
0
from terrascript import Terrascript, provider
from terrascript.google.r import *
from python_terraform import *

ts = Terrascript()
region = 'asia-south1'
ts += provider('google',
               credentials='../service_account_gke.json',
               project='learn-internal',
               region=region)

nw = ts.add(
    google_compute_network('test-network',
                           name='test-gke-nw',
                           auto_create_subnetworks=False,
                           routing_mode='GLOBAL'))

snw = ts.add(
    google_compute_subnetwork('test-snw',
                              name='test-gke-snw',
                              ip_cidr_range='10.5.0.0/16',
                              region=region,
                              network=nw.name))

gke = ts.add(
    google_container_cluster(
        'primary',
        name='my-cluster',
        zone='asia-south1-a',
        additional_zones=['asia-south1-b', 'asia-south1-c'],
        cluster_ipv4_cidr='172.16.0.0/16',
Beispiel #25
0
from terrascript import Terrascript, provider
from python_terraform import *
from terrascript.google.r import google_compute_instance
from google.oauth2 import service_account

credentials = service_account.Credentials.from_service_account_file(
    '/home/nasiruddin_happy/ns002p/service_account.json')
ts = Terrascript()

ts += provider(
    'google',
    credentials='/home/nasiruddin_happy/ns002p/service_account.json',
    project='learn-internal',
    region='us-central1')
inst = ts.add(
    google_compute_instance('test-vm',
                            name='test-nasir1-tft',
                            boot_disk=[{
                                'initialize_params': [{
                                    'image':
                                    'debian-cloud/debian-9'
                                }]
                            }],
                            network_interface=[{
                                'network': 'default',
                                'access_config': {}
                            }],
                            machine_type='n1-standard-1',
                            zone='us-east1-b'))

print(ts.dump())
class TerrascriptVSphereVM:
    def __init__(self, name, guestid, cpu, memory, folder=''):
        self.ts = Terrascript()
        self.name = name
        self.guestid = guestid
        self.cpu = cpu
        self.memory = memory
        self.folder = folder
        self.provider = None
        self.datacenter = None
        self.datastore = None
        self.template = None
        self.gateway = None
        self.disks = []
        self.networks = []
        self.interfaces = []
        self.iface_customization = []
        self.dns = []

    def setProvider(self, host, username, password):
        logger = logging.getLogger()
        logger.debug("Set VSphere provider to {}".format(host))
        self.provider = provider(
            "vsphere",
            user=username,
            password=password,
            vsphere_server=host,
            allow_unverified_ssl=True)
        self.ts.add(self.provider)

    def setDatacenter(self, datacenter):
        logger = logging.getLogger()
        logger.debug("Set VSphere datacenter to {}".format(datacenter))
        self.datacenter = vsphere_datacenter(
            "dc",
            name=datacenter)
        self.ts.add(self.datacenter)

    def setDatastore(self, datastore):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere datastore to {}".format(datastore))
            self.datastore = vsphere_datastore(
                "ds",
                name=datastore,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.datastore)

    def setResourcePool(self, pool):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere Resource Pool to {}".format(pool))
            self.pool = vsphere_resource_pool(
                "pool",
                name=pool,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.pool)

    def setTemplate(self, template):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Set VSphere template to {}".format(template))
            self.template = data_vsphere_virtual_machine(
                "template",
                name=template,
                datacenter_id=self.datacenter.id)
            self.ts.add(self.template)

    def addDisk(self, size):
        idx = len(self.disks) + 1
        logger = logging.getLogger()
        logger.debug("Add {}GB disk".format(size))
        self.disks.append({
            "label": "disk{}".format(idx),
            "size": size,
            "unit_number": idx})

    def addNetworkInterface(self, dvp, ipaddr, cidr):
        if not self.datacenter:
            raise Exception
        else:
            logger = logging.getLogger()
            logger.debug("Add network card on {} DVP, with {}/{}".format(dvp, ipaddr, cidr))
            vnet = vsphere_network(
                dvp,
                name=dvp,
                datacenter_id=self.datacenter.id)
            self.networks.append(vnet)
            self.ts.add(vnet)
            self.interfaces.append({"network_id": vnet.id})
            self.iface_customization.append({
                "ipv4_address": ipaddr,
                "ipv4_netmask": cidr})

    def setDomain(self, domain):
        logger = logging.getLogger()
        logger.debug("Set {} domain".format(domain))
        self.domain = domain

    def setTimezone(self, timezone):
        logger = logging.getLogger()
        logger.debug("Set timezone to {}".format(timezone))
        self.timezone = timezone

    def setGateway(self, gateway):
        logger = logging.getLogger()
        logger.debug("Set default gateway to {}".format(gateway))
        self.gateway = gateway

    def addDns(self, dns):
        logger = logging.getLogger()
        logger.debug("Add {} to DNS list".format(dns))
        self.dns.append(dns)

    def saveConfiguration(self, filename):
        linuxOptions = {}
        linuxOptions["host_name"] = self.name
        linuxOptions["domain"] = self.domain
        linuxOptions["time_zone"] = self.timezone

        customize = {}
        customize["linux_options"] = linuxOptions
        customize["network_interface"] = self.iface_customization
        customize["ipv4_gateway"] = self.gateway
        customize["dns_server_list"] = self.dns

        clone = {}
        clone["template_uuid"] = self.template.id
        clone["linked_clone"] = False
        clone["customize"] = customize

        if self.folder != '':
            self.virtualMachine = vsphere_virtual_machine(
                'vm',
                name=self.name,
                resource_pool_id=self.pool.id,
                datastore_id=self.datastore.id,
                guest_id=self.guestid,
                folder=self.folder,
                num_cpus=self.cpu,
                memory=self.memory,
                network_interface=self.interfaces,
                disk=self.disks,
                clone=clone)
        else:
            self.virtualMachine = vsphere_virtual_machine(
                'vm',
                name=self.name,
                resource_pool_id=self.pool.id,
                datastore_id=self.datastore.id,
                guest_id=self.guestid,
                num_cpus=self.cpu,
                memory=self.memory,
                network_interface=self.interfaces,
                disk=self.disks,
                clone=clone)
        self.ts.add(self.virtualMachine)
        fd = open(filename, 'w')
        fd.writelines(self.ts.dump())
        fd.close()
        return 0
Beispiel #27
0
    def gen_site_docker_deploy_config(cls,
                                      *,
                                      docker_host: str = DEFAULT_DOCKER_HOST,
                                      site_name: str = None,
                                      template_tar_bytes: bytes = None,
                                      script: str = None,
                                      ssh_user: str = DEFAULT_SSH_USER,
                                      ssh_private_key: str,
                                      ssh_host: str,
                                      ssh_port: int = DEFAULT_SSH_PORT):
        config = Terrascript()
        docker_provider = provider.docker(host=docker_host,
                                          connection=cls.gen_ssh_conn_config(
                                              ssh_user=ssh_user,
                                              ssh_private_key=ssh_private_key,
                                              ssh_host=ssh_host,
                                              ssh_port=ssh_port))
        docker_image_resource = resource.docker_image(
            'nginx_image',
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            'nginx_container',
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={'internal': 80},
            upload=[])
        docker_name_resource = resource.random_pet(
            'docker_pet_name',
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode('utf8')
            template_tar_path = f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource['upload'].append({
                'content_base64':
                template_tar_file_content,
                'file':
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH)
            docker_container_resource['upload'].append({
                'content':
                entrypoint_sh_content,
                'file':
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config
from terrascript import Terrascript

from terrascript import provider
from terrascript import variable
from terrascript.aws.d import aws_vpc

ts = Terrascript()

#########################################
# DigitalOcean


def ts_digitalocean(version='>= 1.7, < 1.9', **kwargs):
    ts_local = Terrascript()

    do_token = ts_local.add(variable('do_token'))

    default_parms = dict(token=do_token)
    default_parms.update(kwargs)

    ts_local.add(provider('digitalocean', version=version, **default_parms))

    return ts_local


#########################################
# Cloudflare


def ts_cloudflare(version='< 1.9.0', **kwargs):
    ts_local = Terrascript()
Beispiel #29
0
from pip._vendor.html5lib.constants import EOF
from terrascript import Terrascript, provider
from terrascript.google.r import *
from nw import *
from variables import *

ts = Terrascript()
# metadata_startup_script = '${data.start_up.startup_script.rendered}'

inst = ts.add(
    google_compute_instance(
        'test-vm',
        name='test-pavan-tft',
        boot_disk=[{
            'initialize_params': [{
                'image': 'debian-cloud/debian-9'
            }]
        }],
        network_interface=[{
            'subnetwork': snw.self_link,
            'access_config': {}
        }],
        machine_type='n1-standard-1',
        tags=['test-http', 'test-ssh'],
        zone=zone,
        metadata_startup_script=
        '#!/bin/bash\napt-get update -y\napt-get install nginx -y\nservice nginx start'
    ))

file = open("./terraform/vm.tf.json", "w")
file.write(ts.dump())
from terrascript import Terrascript
from terrascript.digitalocean.r import digitalocean_ssh_key

ts = Terrascript()


def do_ssh_key(name, **kwargs):
    """
    Example of public_key:
        public_key='${file("/tmp/ssh_keys/soar-staging-keypair.pub")}'
    """

    default_params = dict(name=name)
    default_params.update(kwargs)
    return digitalocean_ssh_key(name, **default_params)


ts_key_pairs = ts

__all__ = ['ts_key_pairs', 'do_ssh_key']