def clean(): if os.path.exists(RUNNING_DIR): test_dirs = os.listdir(RUNNING_DIR) left_over_tests = [] for test_id in test_dirs: left_over_tests.append(test_id) report_url = "%s/report" % CONFIG['report_service_base_url'] resp = requests.get(report_url) reports = resp.json() output = {} for test_id in reports: if 'test timedout' in reports[test_id]['results'] and \ test_id in left_over_tests: state = {} with open(os.path.join(RUNNING_DIR, test_id, 'terraform.tfstate'), 'r') as sf: state = json.load(sf) for r in state['resources']: if r['type'] == 'ibm_is_instance': output[test_id] = { 'instance_id': r['instances'][0]['attributes']['id'], 'image_id': r['instances'][0]['attributes']['image'], 'status': r['instances'][0]['attributes']['status'] } else: test_dir = os.path.join(RUNNING_DIR, test_id) if os.path.exists(test_dir): tf = pt.Terraform(working_dir=test_dir, var_file='test_vars.tfvars') tf.init() tf.destroy() shutil.rmtree(test_dir) print(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
def deploy( project_link_or_path, public_key_path, private_key_path, py, instance_type, deployment_source="github", ): tfvars = utils.build_and_validate_tfvars( project_link_or_path, public_key_path, private_key_path, py, instance_type, deployment_source=deployment_source, ) with open(DEPLOYMENT_DIR / "terraform.tfvars", "w") as f: f.write(tfvars) tf = pt.Terraform() tf.init( dir_or_plan=str(DEPLOYMENT_DIR), from_module=TERRAFORM_DIR, capture_output=False, ) return_code, _, _ = tf.apply(capture_output=False)
def __init__(self, properties, uid): self.cloud_conf = properties["brokers"] self.uid = uid workspace = workspace_with_fingerprint(self.cloud_conf) if workspace is None: if not os.path.exists(self.uid): copy_tree(src=TERRAFORM_SOURCE_DIR, dst=self.uid) self.terraform = python_terraform.Terraform(working_dir=self.uid) self.terraform.init() self.provisioned = False else: self.terraform = python_terraform.Terraform(working_dir=workspace) self.uid = workspace self.provisioned = True
def deploy( init: bool = typer.Option( False, help="Should terraform init be run before terraform apply"), project_home: str = typer.Option(".", help="The root of the project"), auto: bool = typer.Option(False), ): t = tf.Terraform(working_dir=f"{project_home}/infra") if not tf_command(t, "apply", init): return
def workspace_with_fingerprint(cloud_conf): target_fingerprint = fingerprint_from_configuration(cloud_conf) for directory in os.listdir("./"): if os.path.exists("%s/main.tf" % directory): terraform = python_terraform.Terraform(working_dir=directory) workspace_fingerprint = terraform.output("fingerprint") if workspace_fingerprint == target_fingerprint: return directory return None
def terraform_destroy(planId: str, logger: KopfObjectLogger) -> tuple: logger.info(f"PLANID: {planId}") #@TODO check if planId exists throw kopf eception if not ptf = python_terraform.Terraform(working_dir=planId) return_code, stdout, stderr = ptf.destroy(dir_or_plan=f"{planId}/plan", refresh=True, auto_approve=True) # return_code, stdout, stderr = 0, 'all destroyed', '' response = stdout if not stderr else stderr logger.info(f"TERRAFORM DESTROY COMPLETE: {return_code} {response}") return response, return_code
def __init__(self, **kwargs): super(TerraformInput, self).__init__(**kwargs) self.kind = 'terraform' self.name = kwargs['name'] try: self.config_dir = kwargs['config_dir'] except KeyError: raise ValueError('Missing parameter config_dir') self.client = python_terraform.Terraform(working_dir=self.config_dir) print self.client.fmt(diff=True)
def deploy_cluster( #git_path = None, ): tf = pt.Terraform() tf.init( dir_or_plan=str(DEPLOYMENT_DIRECTORY), from_module=TERRAFORM_DIRECTORY, capture_output=False, ) return_code, _, _ = tf.apply(capture_output=False, skip_plan=True) subprocess.call(str(BASE_DIRECTORY / "k8Communication.sh"))
def plan( init: bool = typer.Option( False, help="Should terraform init be run before terraform plan"), project_home: str = typer.Option(".", help="The root of the project"), no_terraform: bool = typer.Option(False, help="Skip terraform planning"), ): """Identify the plan that will be executed on a subsequent apply """ t = tf.Terraform(working_dir=f"{project_home}/infra") if not tf_command(t, "plan", init): return
def destroy_test(test_path): test_id = os.path.basename(test_path) results = {"test_errored": "forced destroyed"} tf = pt.Terraform(working_dir=test_path, var_file='test_vars.tfvars') if not os.path.exists(os.path.join(test_path, '.terraform')): (rc, out, err) = tf.init() results = {'terraform_failed': "init failure: %s" % err} LOG.info('destroying cloud resources for test %s', test_id) (rc, out, err) = tf.destroy() if rc > 0: LOG.error('could not destroy test: %s: %s. Manually fix.', test_id, err) else: shutil.rmtree(test_path)
def destroy(): vpc_path = "%s/vpcs" % os.path.dirname(os.path.realpath(__file__)) zones = [f.path for f in os.scandir(vpc_path) if f.is_dir()] for zone in zones: if os.path.basename(zone) in ZONES: LOG.debug('destroying resources in zone: %s', os.path.basename(zone)) tf = pt.Terraform(working_dir=zone) tf.init() tf.destroy(auto_approve=True, var={'api_key': API_KEY}, capture_output=False) output_file = "%s/%s" % (os.path.dirname( os.path.realpath(__file__)), OUTPUT_FILE) if os.path.exists(output_file): os.unlink(output_file)
def run_test(test_dir, zone, image, ttype): test_id = os.path.basename(test_dir) LOG.info('running test %s' % test_id) start_data = { 'zone': zone, 'image_name': image, 'type': ttype, 'duration': 0 } start_report(test_id, start_data) tf = pt.Terraform(working_dir=test_dir, var_file='test_vars.tfvars') (rc, out, err) = tf.init() if rc > 0: results = {'terraform_failed': "init failure: %s" % err} stop_report(test_id, results) LOG.info('creating cloud resources for test %s', test_id) (rc, out, err) = tf.apply(dir_or_plan=False, skip_plan=True) if rc > 0: LOG.error('terraform failed for test: %s - %s', test_id, err) results = {'terraform_failed': "apply failure: %s" % err} stop_report(test_id, results) out = tf.output(json=True) now = datetime.datetime.utcnow() update_data = { 'terraform_apply_result_code': rc, 'terraform_output': out, 'terraform_apply_completed_at': now.timestamp(), 'terraform_apply_completed_at_readable': now.strftime('%Y-%m-%d %H:%M:%S UTC'), 'duration': 1 } update_report(test_id, update_data) #results = poll_report(test_id) #if not results: # results = {"test timedout": "(%d seconds)" % int(CONFIG['test_timeout'])} # stop_report(test_id, results) LOG.info('destroying cloud resources for test %s', test_id) (rc, out, err) = tf.destroy() if rc > 0: LOG.error('could not destroy test: %s: %s. Manually fix.', test_id, err) else: shutil.move(test_dir, os.path.join(COMPLETE_DIR, test_id))
def inventory(): output = {} vpc_path = "%s/vpcs" % os.path.dirname(os.path.realpath(__file__)) zones = [f.path for f in os.scandir(vpc_path) if f.is_dir()] for zone in zones: if os.path.basename(zone) in ZONES: LOG.debug('getting inventory in zone %s', zone) tf = pt.Terraform(working_dir=zone) tf.init() zout = tf.output(json=True) output[os.path.basename(zone)] = zout json_out = json.dumps(output) with open( "%s/%s" % (os.path.dirname(os.path.realpath(__file__)), OUTPUT_FILE), 'w') as inventory: inventory.write(json_out)
def terraform(working_dir: str, data: str, logger: KopfObjectLogger, apply: bool = False, planId: str = '') -> tuple: logger.info(f"WORKING IN DIR: {working_dir}") Path(f"{working_dir}/main.tf.json").write_text(data) ptf = python_terraform.Terraform(working_dir=working_dir) return_code, stdout, stderr = ptf.init() assert return_code != 1, f"Terraform Init Failed {stderr}" logger.info('TERRAFORM INIT COMPLETE') return_code, stdout, stderr = ptf.plan(refresh=True, out='plan') response = stdout if not stderr else stderr logger.info(f"TERRAFORM PLAN COMPLETE {response}") return response, return_code
def apply(): vpc_path = "%s/vpcs" % os.path.dirname(os.path.realpath(__file__)) zones = [f.path for f in os.scandir(vpc_path) if f.is_dir()] for zone in zones: if os.path.basename(zone) in ZONES: LOG.debug('applying resources in zone: %s', os.path.basename(zone)) tf = pt.Terraform(working_dir=zone) tf.init() key = RSA.generate(2048, os.urandom) ssh_public_key = key.exportKey('OpenSSH').decode('utf-8') ssh_private_key = key.export_key().decode('utf-8') LOG.info("running apply on %s" % zone) tf.apply(dir_or_plan=False, var={ 'api_key': API_KEY, 'ssh_public_key': ssh_public_key, 'ssh_private_key': ssh_private_key }, skip_plan=True, capture_output=False)
def clean(): if os.path.exists(RUNNING_DIR): test_dirs = os.listdir(RUNNING_DIR) left_over_tests = [] for test_id in test_dirs: left_over_tests.append(test_id) report_url = "%s/report" % CONFIG['report_service_base_url'] resp = requests.get(report_url) reports = resp.json() output = {} for test_id in reports: test_dir = os.path.join(RUNNING_DIR, test_id) if os.path.exists(test_dir): LOG.debug("Destorying %s", test_dir) tf = pt.Terraform(working_dir=test_dir, var_file='test_vars.tfvars') tf.init() tf.destroy() shutil.rmtree(test_dir) print(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
def create_resource(self, kind, metadata): logger.info("Creating {} resource".format(kind)) if kind == 'tf_state': state_dir = "terraform.tfstate.d/{}".format(metadata['name']) os.makedirs(os.path.join(metadata['template_dir'], state_dir)) with open( os.path.join(metadata['template_dir'], '.terraform', 'environment'), 'w') as file_handler: file_handler.write(metadata['name']) self.client = python_terraform.Terraform( working_dir=metadata['template_dir'], state="{}/terraform.tfstate".format(state_dir)) return_code, raw_data, stderr = self.client.apply( no_color=python_terraform.IsFlagged, auto_approve=True, var=metadata['states'][0]['variables']) if os.path.isfile('{}/terraform.tfstate'.format( os.path.join(metadata['template_dir'], state_dir))): with open('{}/terraform.tfstate'.format( os.path.join(metadata['template_dir'], state_dir))) as file_handler: metadata['states'][0]['state'] = json.loads( file_handler.read()) metadata['states'][0]['serial'] = metadata['states'][0][ 'state']['serial'] metadata['states'][0].pop('min_serial') return_code, raw_data, stderr = self.client.cmd( 'output', json=python_terraform.IsFlagged) if return_code == 0: metadata['states'][0]['output'] = json.loads(raw_data) self._create_resource(metadata['name'], metadata['name'], 'tf_state', metadata=metadata) self.save()
def get_resource_metadata(self, kind, uid=None): logger.info("Getting {} resources".format(kind)) response = {} if kind == 'tf_template': path = self.metadata['template_path'] if uid is None: templates = glob.glob('{}/*'.format(path)) else: templates = ['{}/{}'.format(path, uid)] for template in templates: resource = [] variable = [] files = glob.glob('{}/*.tf'.format(template)) for filename in files: with open(filename) as file_handler: name = filename.replace('{}/'.format(template), '') resource.append({ 'name': name, 'items': file_handler.read(), 'format': 'hcl' }) files = glob.glob('{}/*.tf.json'.format(template)) for filename in files: with open(filename) as file_handler: name = filename.replace('{}/'.format(template), '') resource.append({ 'name': name, 'items': json.loads(file_handler.read()), 'format': 'json' }) files = glob.glob('{}/*.tfvars'.format(template)) for filename in files: with open(filename) as file_handler: name = filename.replace('{}/'.format(template), '') variable.append({ 'name': name, 'items': file_handler.read(), 'format': 'hcl' }) files = glob.glob('{}/*.tfvars.json'.format(template)) for filename in files: with open(filename) as file_handler: name = filename.replace('{}/'.format(template), '') variable.append({ 'name': name, 'items': json.loads(file_handler.read()), 'format': 'json' }) client = python_terraform.Terraform(working_dir=template) return_code, raw_data, stderr = client.init( reconfigure=python_terraform.IsFlagged, backend=False) if stderr == '': status = True init = raw_data else: status = False init = stderr data = { 'init': raw_data, 'status': status, 'resources': resource, 'variables': variable } response[template.replace('{}/'.format(path), '')] = data elif kind == 'tf_state': path = self.metadata['template_path'] if uid is None: templates = glob.glob('{}/*'.format(path)) else: templates = ['{}/{}'.format(path, uid)] for template in templates: state = {} if os.path.isfile('{}/terraform.tfstate'.format(template)): with open('{}/terraform.tfstate'.format( template)) as file_handler: state['default'] = file_handler.read() files = glob.glob( '{}/terraform.tfstate.d/*/terraform.tfstate'.format( template)) for filename in files: with open(filename) as file_handler: name = filename.replace( '{}/terraform.tfstate.d/'.format(template), '').replace('/terraform.tfstate', '') state[name] = file_handler.read() for name, content in state.items(): data = { 'state': json.loads(content), 'template': template.replace('{}/'.format(path), '') } response[name] = data elif kind == 'tf_resource': return_code, raw_data, stderr = self.client.graph( no_color=python_terraform.IsFlagged) graph = graph_from_dot_data(raw_data)[0] #response = graph.obj_dict['subgraphs']['"root"'][0]['nodes'] return response
def __init__(self): self.terraform = python_terraform.Terraform(working_dir='./terraform/')
def __init__(self, **kwargs): super(TerraformClient, self).__init__(**kwargs) self.client = python_terraform.Terraform( working_dir=self.config['dir'])
def destroy(): # Ensure that the proper backend files are in the deployment directory utils.pre_destroy_check(DEPLOYMENT_DIR) tf = pt.Terraform() tf.destroy(capture_output=False)
def destroy_cluster(): # Ensure that the correct terraform state files exist in the deployment utils.pre_destroy_check(DEPLOYMENT_DIRECTORY) tf = pt.Terraform() tf.destroy(capture_output=False)
def main(): """ Launch a distributed task with VMs """ parser = argparse.ArgumentParser( description= 'This script launch a distributed task on VMs on the cloud or on bare metal' ) parser.add_argument( '--terraform-file', help='configuration file for instances to launch (YAML format)', dest='terraform_file') parser.add_argument('--task-file', help='list of tasks to process (YAML format)', dest='tasks_list') parser.add_argument( '--working-folder', help='working folder where terraform.tfstate will be stored', dest='work_folder', default='.') parser.add_argument('--task-name', help='name of global task to proceed', dest='task_name', default='datascience_task') parser.add_argument('--log-file', help='name of log file into work dir', dest='log_file', default='log.txt') parser.add_argument('--ssh-key', help='location of the private SSH key (.pem file)', dest='ssh_key', default=None) args = parser.parse_args() # get and apply terraform file tf_file = args.terraform_file work_dir = args.work_folder task_name = args.task_name log_file = args.log_file task_file = args.tasks_list ssh_key = args.ssh_key tfstate_name = work_dir + '/' + task_name + '.tfstate' tf = Terraform.Terraform(state=work_dir + '/' + task_name + '.tfstate') print tf.plan(work_dir, out=work_dir + '/' + task_name + '.out', refresh=True, no_color=Terraform.IsFlagged, state=work_dir + '/' + task_name + '.tfstate') print tf.apply(work_dir, state=work_dir + '/' + task_name + '.tfstate', refresh=True, no_color=Terraform.IsFlagged) print("waiting for everyone to be launched") time.sleep(10) with open(tfstate_name) as f: tfstate = json.load(f) hosts_list = tfstate['modules'][0] hosts_list = hosts_list['resources'] hosts_attributes = [] # get list of hosts attributes for instance in hosts_list: hosts_attributes.append(hosts_list[instance]['primary']['attributes']) # create all hosts file nb_hosts = len(hosts_attributes) with open("%s/host_%s.ini" % (work_dir, task_name), "w") as file: file.write('[task_' + task_name + ']\n') instance_counter = 1 for host in hosts_attributes: file.write('ds_instance_%s ansible_ssh_host=%s \n' % (instance_counter, host['public_ip'])) instance_counter += 1 # create hosts files by group + specific host files with group name nb_host = 0 for host in hosts_attributes: nb_hosts += 1 instance_counter = 1 for host in hosts_attributes: # create an all hosts file with open( "%s/host_%s_%s.ini" % (work_dir, host['tags.Group'], task_name), "w") as file: file.write('[task_' + task_name + '_' + host['tags.Group'] + ']\n') instance_counter += 1 instance_counter = 1 for host in hosts_attributes: # create an all hosts file with open( "%s/host_%s_%s.ini" % (work_dir, host['tags.Group'], task_name), "a") as file: file.write('ds_instance_%s ansible_ssh_host=%s \n' % (instance_counter, host['public_ip'])) instance_counter += 1 instance_counter = 1 for host in hosts_attributes: # create one file per host with open( "%s/host_%s_%s_%s.ini" % (work_dir, host['tags.Group'], task_name, instance_counter), "w") as file: file.write('[task_' + task_name + '_' + host['tags.Group'] + ']\n') file.write('ds_instance_%s ansible_ssh_host=%s \n' % (instance_counter, host['public_ip'])) instance_counter += 1 ### create specific host file + associated thread threads_list = [] host_counter = 1 for host in hosts_attributes: with open("%s/host_%s_%s.ini" % (work_dir, task_name, host_counter), "w") as file: file.write('[task_' + task_name + ']\n') file.write('ds_instance_%s ansible_ssh_host=%s \n' % (host_counter, host['public_ip'])) thread = taskThread(host_counter, task_name, host_counter, work_dir, taskQueue, log_file, ssh_key) threads_list.append(thread) host_counter += 1 ### add IPs to list of accepted SSH certificates add_linebreak = False ssh_directory = os.getenv("HOME") + '/.ssh' ssh_known_hosts_file = os.getenv("HOME") + '/.ssh/known_hosts' if not (os.path.exists(ssh_directory)): os.makedirs(ssh_directory) if (os.path.exists(ssh_known_hosts_file)): with open(ssh_known_hosts_file, 'r') as file: lines = file.read() for line in lines.split('\n'): if line != "": add_linebreak = False break #for host in hosts_attributes : #clean_ssh_key(host['public_ip']) print("adding SSH certificate") with open(os.getenv("HOME") + "/.ssh/known_hosts", "a") as file: if add_linebreak: file.write('\n') for host in hosts_attributes: #clean_ssh_key(host['public_ip']) line = add_ssh_key_for_host(host['public_ip']) file.write(line + '\n') ### fill task Queue + global tasks list global_tasks_list = [] queueLock.acquire() task_id = 1 with open(task_file, "r") as file: lines = file.read() #print(lines) for line in lines.split('\n'): if line != "": #print line task_specs = {} elts = line.split('\t') if ((len(elts) > 1) & (str(elts[2]) != "-1")): task_specs.update({"subtask_name": elts[0]}) task_specs.update({"playbook_file": elts[1]}) task_specs.update({"task_id": task_id}) taskQueue.put(task_specs) if ((len(elts) > 1) & (str(elts[2]) == "-1")): global_tasks_list.append([elts[0], elts[1]]) queueLock.release() ### launch global tasks with open(work_dir + "/" + log_file, "w") as file: file.write("Launching global tasks %s with %s hosts \n" % (task_name, host_counter)) for task in global_tasks_list: global_task(task_name, work_dir, task[1], task[0], log_file, ssh_key) ### launch threads if not taskQueue.empty(): #print(work_dir + "/" + log_file) with open(work_dir + "/" + log_file, "w") as file: file.write("Launching task %s with %s hosts \n" % (task_name, host_counter)) for t in threads_list: t.start() # Wait for queue to empty while not taskQueue.empty(): pass print "Empty queue" # Notify threads it's time to exit exitFlag = 1 # Wait for all threads to complete for t in threads_list: t.join() print "Closing all threads" with open(work_dir + "/" + log_file, "a") as file: file.write("Finished task %s with %s hosts \n" % (task_name, host_counter))
def rpm_build(spec=('s', 'spec-file', 'spec file for rpm build process'), target=('t', 'target', 'Operating system targeted by this package')): """ Build rpm """ import os current_directory = os.path.dirname(os.path.realpath(__file__)) mapping_configuration = os.path.join(current_directory, '..', 'MAPPING.ini') import configparser internal_config = configparser.ConfigParser() internal_config.read(mapping_configuration) targets = internal_config.sections() if not target in targets: import sys sys.stderr.write('Unsupported platform') sys.exit(1) else: provider = internal_config[target]['provider'] # Import provider method import importlib module = importlib.import_module('providers.%s' % provider) class_name = get_class_name(provider) cls = getattr(module, class_name) instance = cls() import re import sys import os lines = open(os.path.expanduser(spec)).readlines() binary_options = {} for line in lines: m = re.match(r'^([A-Za-z0-9]+):(.*)', line) if m: key = m.group(1).strip() value = m.group(2).strip() binary_options[key] = value import configparser import os import pystache config = configparser.ConfigParser() config.read(os.path.expanduser('~/.config/builder.cfg')) try: credentials = config.items(provider) except NoSectionError: raise RuntimeError('Unregistered provider') import sys sys.exit(1) current_directory = os.path.dirname(os.path.realpath(__file__)) template = os.path.join(current_directory, '..', 'templates', ('%s.tf' % provider)) user_data = os.path.join(current_directory, '..', 'profiles', ('%s.yml' % provider)) import tempfile tmp_file = tempfile.mkstemp() key = paramiko.RSAKey.generate(4096) key.write_private_key_file(tmp_file[1]) tf_config = pystache.render( open(template).read(), { 'credentials': dict(credentials), 'command': { 'identifier': internal_config[target]['internal'] }, 'application': { 'ssh_key': ssh_pub_key(tmp_file[1]), 'user_data': open(user_data).read().replace('\n', '\\n') } }) d = tempfile.mkdtemp() with open(os.path.join(d, 'main.tf'), 'w') as tf_file: tf_file.write(tf_config) import python_terraform as tf option_dict = dict() # option_dict['input'] = False option_dict['auto-approve'] = True tf = tf.Terraform(working_dir=d) try: return_code, stdout, stderr = tf.init() except FileNotFoundError: raise RuntimeError('Terraform not found') import sys sys.exit(1) try: return_code, stdout, stderr = tf.plan(d) except FileNotFoundError: raise RuntimeError('Terraform not found') import sys sys.exit(1) try: return_code, stdout, stderr = tf.apply(d, **option_dict) except FileNotFoundError: raise RuntimeError('Terraform not found') import sys sys.exit(1) import json tfstate = json.loads(open(os.path.join(d, 'terraform.tfstate')).read()) public_dns = tfstate['modules'][0]['resources'][ '%s_%s.builder' % (provider.replace('_', ''), instance.__target__)]['primary']['attributes']['ipv4_address'] client = paramiko.SSHClient() # Avoid missing host entry client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # SSH username username = '******' try: username = instance.__ssh__user__ except AttributeError: pass import time while True: time.sleep(10) print('Waiting 10s for ssh -i %s %s@%s' % (tmp_file[1], username, public_dns)) try: client.connect(public_dns, username=username, key_filename=tmp_file[1]) except Exception as e: print(e) continue else: break remote_env = {} stdin, stdout, stderr = client.exec_command('env') for line in stdout.readlines(): key, value = line.strip().split( '=', 1 ) # split on first occurrence since some var could contain = (ex: DBUS_SESSION_BUS_ADDRESS) remote_env[key] = value # Wait cloud-init to finish # TODO: use a agnostic way (non linux only) to check that instance is ready while True: stdin, stdout, stderr = client.exec_command('pgrep cloud-init') lines = stdout.readlines() line_number = len(lines) time.sleep(10) if line_number == 0: break cmd = 'mkdir -p %s/rpmbuild/{SPECS,RPMS,SRPMS,SOURCES,BUILD,BUILDROOT}' % remote_env[ 'HOME'] client.exec_command(cmd) sftp = paramiko.SFTPClient.from_transport(client.get_transport()) sftp.put( spec, '%s/rpmbuild/SPECS/%s' % (remote_env['HOME'], os.path.basename(spec))) import urllib.request archive = tempfile.mkstemp() archive_name = os.path.basename(binary_options['Source0']) urllib.request.urlretrieve(binary_options['Source0'], archive[1]) package = os.path.basename(spec) sftp.put(archive[1], '%s/rpmbuild/SOURCES/%s' % (remote_env['HOME'], archive_name)) stdin, stdout, stderr = client.exec_command( 'rpmbuild -bp %s/rpmbuild/SPECS/%s' % (remote_env['HOME'], package), get_pty=True) for line in stdout.readlines(): print(line.strip()) stdin, stdout, stderr = client.exec_command( 'rpmbuild -bc --short-circuit %s/rpmbuild/SPECS/%s' % (remote_env['HOME'], package), get_pty=True) for line in stdout.readlines(): print(line.strip()) stdin, stdout, stderr = client.exec_command( 'rpmbuild -bi --short-circuit %s/rpmbuild/SPECS/%s' % (remote_env['HOME'], package), get_pty=True) for line in stdout.readlines(): print(line.strip()) stdin, stdout, stderr = client.exec_command( 'rpmbuild -ba %s/rpmbuild/SPECS/%s' % (remote_env['HOME'], package), get_pty=True) files = [] for line in stdout.readlines(): print(line.strip()) if line.startswith('Wrote'): _, path = line.split(':') files.append(path.strip()) for binary in files: sftp.get(binary, '/tmp/%s' % os.path.basename(binary)) client.close() try: return_code, stdout, stderr = tf.destroy(d) except FileNotFoundError: raise RuntimeError('Terraform not found') import sys sys.exit(1) return True
def run_test(test_path): (zone, image, ttype, test_dir) = initialize_test_dir(test_path) test_id = os.path.basename(test_dir) LOG.info('running test %s' % test_id) start_data = { 'zone': zone, 'image_name': image, 'type': ttype } tf = pt.Terraform(working_dir=test_dir, var_file='test_vars.tfvars') LOG.info('initializing provider resources for %s', test_id) (rc, out, err) = tf.init() start_report(test_id, start_data) if rc > 0: results = {'terraform_failed': "init failure: %s" % err} stop_report(test_id, results) return LOG.info('creating cloud resources for test %s', test_id) (rc, out, err) = tf.apply(dir_or_plan=False, skip_plan=True) if rc > 0: LOG.error('terraform failed for test: %s - %s', test_id, err) results = {'terraform_failed': "apply failure: %s" % err} stop_report(test_id, results) out = tf.output(json=True) now = datetime.datetime.utcnow() update_data = { 'terraform_apply_result_code': rc, 'terraform_output': out, 'terraform_apply_completed_at': now.timestamp(), 'terraform_apply_completed_at_readable': now.strftime('%Y-%m-%d %H:%M:%S UTC') } update_report(test_id, update_data) results = poll_report(test_id) if not results: results = {"test timedout": "(%d seconds)" % int(CONFIG['test_timeout'])} stop_report(test_id, results) if 'preserve_timed_out_instances' in CONFIG and CONFIG['preserve_timed_out_instances']: LOG.error('preserving timedout instance for test: %s for debug', test_id) os.makedirs(ERRORED_DIR, exist_ok=True) shutil.move(test_dir, os.path.join(ERRORED_DIR, test_id)) else: LOG.info('destroying cloud resources for test %s', test_id) (rc, out, err) = tf.destroy() if rc > 0: LOG.error('could not destroy test: %s: %s. Manually fix.', test_id, err) shutil.rmtree(test_dir) else: if results['results']['status'] == "ERROR": if 'preserve_errored_instances' in CONFIG and CONFIG['preserve_errored_instances']: LOG.error('preserving errored instance for test: %s for debug', test_id) os.makedirs(ERRORED_DIR, exist_ok=True) shutil.move(test_dir, os.path.join(ERRORED_DIR, test_id)) else: LOG.error('destroying cloud resources for errored test %s', test_id) (rc, out, err) = tf.destroy() if rc > 0: LOG.error('could not destroy test: %s: %s. Manually fix.', test_id, err) shutil.rmtree(test_dir) else: LOG.info('destroying cloud resources for completed test %s', test_id) (rc, out, err) = tf.destroy() if rc > 0: LOG.error('could not destroy test: %s: %s. Manually fix.', test_id, err) if 'keep_completed_state' in CONFIG and CONFIG['keep_completed_state']: shutil.move(test_dir, os.path.join(COMPLETE_DIR, test_id)) else: shutil.rmtree(test_dir)
def delete_workspace(directory): terraform = python_terraform.Terraform(working_dir=directory) terraform.destroy(force=True, capture_output=False) if directory != "provisioning": shutil.rmtree(directory)
#!/usr/bin/env python import os import json import argparse import python_terraform #Branch (prod/stage) branch = 'stage' #external variables for app,db external_var_name_app = 'external_ip_address_app' external_var_name_db = 'external_ip_address_db' os.chdir('../terraform/' + branch) t = python_terraform.Terraform() a = t.output() inv = { "app": { "hosts": [a[external_var_name_app]['value']], "vars": { "db_host": a[external_var_name_db]['value'] } }, "db": { "hosts": [a[external_var_name_db]['value']], "vars": { "mongo_bind_ip": a[external_var_name_app]['value'] }
def auth(self): self.client = python_terraform.Terraform( working_dir=self.metadata['template_path']) self.status = python_terraform.Terraform( working_dir=self.metadata['template_path']) return True
def main(**kwargs): if shutil.which('terraform') is None: raise click.ClickException( "cannot find 'terraform' executable on PATH") opts = attrdict.AttrMap(kwargs) state_dir = pathlib.Path(opts.state or '.terraform-anygen') terraform_dir = state_dir / 'terraform' out_dir = state_dir / 'out' def rmtree_out_dir(): if out_dir.exists(): shutil.rmtree(str(out_dir)) if not state_dir.exists(): terraform_dir.mkdir(parents=True) debug_dir = state_dir / 'debug' if debug_dir.exists(): shutil.rmtree(str(debug_dir)) debug_dir.mkdir() terraform = python_terraform.Terraform( working_dir=terraform_dir, targets=['module.body.' + i for i in opts.target]) if not opts.destroy: rmtree_out_dir() model_dir = pathlib.Path(opts.model or '.') path = [model_dir] age = anygen.AnygenEngine() ag = age.create( path=path, classes=[ 'terraform' + i for i in ( ['.' + j for j in opts.classes.split(',')] if opts.classes else ['']) ]) ag_result = _debugdump.wrap(ag.produce, debug_dir / 'terraform')(*opts["def"]) tf_data = attrdict.AttrDict() tf_data += ag_result.get("terraform", {}) if opts.destroy: tf_data = {"provider": tf_data.get("provider", [])} else: data_external = {} path_str = os.pathsep.join(str(i.resolve()) for i in path) for k, v in ag_result.get("anygen", {}).items(): query = { "path": path_str, "classes": v["classes"], "debug_dump": str(debug_dir.joinpath('anygen.' + k).absolute()) } for k2, v2 in v.get("args", {}).items(): query["arg_" + k2] = '${jsonencode("%s")}' % v2.replace( '\\', '\\\\').replace('"', '\\"') data_external[k] = { "program": [sys.executable, "-m", "terraform_anygen._gen"], "query": query } if data_external: tf_data += {"data": {"external": data_external}} output = ag_result.get("output", None) if output: tf_data += { "output": dict((k, dict(value=v)) for k, v in output.items()) } main_tf_data = attrdict.AttrDict() main_tf_data += {"module": {"body": {"source": "./body"}}} tf_backend = ag_result.get("backend", None) if tf_backend is not None: main_tf_data += {"terraform": {"backend": tf_backend}} dump_json(terraform_dir.joinpath("main.tf.json"), main_tf_data) body_module_dir = terraform_dir / "body" body_module_dir.mkdir(exist_ok=True) dump_json(body_module_dir.joinpath("main.tf.json"), tf_data) checked_tf( terraform.init(capture_output=False, force_copy=opts.force_backend_copy), 'init') if opts.destroy: checked_tf( terraform.destroy(capture_output=False, force=opts.yes, parallelism=opts.jobs, no_color=python_terraform.IsNotFlagged, refresh=opts.refresh), 'destroy') rmtree_out_dir() else: checked_tf( terraform.apply(capture_output=False, skip_plan=opts.yes, parallelism=opts.jobs, refresh=opts.refresh, no_color=python_terraform.IsNotFlagged), 'apply') on_sucess_classes = ag_result.get("on_success", {}).get("classes", []) if on_sucess_classes: tf_result = terraform.cmd( 'state pull', capture_output=True, ) checked_captured_tf(tf_result, 'state pull') tfstate = json.loads(tf_result[1]) expected_outfiles = set() def jinjafilter_outfile(name): expected_outfiles.add(name) return out_dir.joinpath(name).resolve() ag_success = age.create( path=path, classes=on_sucess_classes, extras=dict(jinjafilter=dict(outfile=jinjafilter_outfile))) for i in tfstate["modules"]: if i["path"] == ["root", "body"]: body_module_outputs = i["outputs"] break else: raise click.ClickException( "failed to find 'body' module in terraform state") body_module_outputs = dict( (k, v["value"]) for k, v in body_module_outputs.items()) ag_success_result = \ _debugdump.wrap(ag_success.produce, debug_dir / 'on_success')( outputs=body_module_outputs ) out_dir.mkdir() for k, v in ag_success_result.get("files", {}).items(): if isinstance(v, str): v = dict(content=v) expected_outfiles.discard(k) rel_path = pathlib.Path(k) if rel_path.is_absolute( ) or '..' in rel_path.parts or not rel_path.parts: raise click.ClickException( "output file path cannot be absolute, contain '..', or be empty: {}" .format(rel_path)) file_path = out_dir / rel_path file_path.parent.mkdir(parents=True, exist_ok=True) file_path.write_text(v["content"]) file_mode = v.get("chmod", None) if file_mode is not None: file_path.chmod(file_mode) if expected_outfiles: raise click.ClickException( "the following files have been referenced via 'outfile' but not produced: " + ', '.join(sorted(expected_outfiles))) text = ag_success_result.get("text", None) if text: if not ag_success_result.get("plaintext", False): text = ansimarkup.parse(text) click.echo(text)