def deploy(self): os.makedirs('.overcloud', exist_ok=True) with open('.overcloud/plan.tf.json', 'w') as planfile: json.dump(self.tf.to_dict(), planfile, indent=2) cli = TfCli(working_dir='.overcloud') cli.init(capture_output=False) cli.apply(capture_output=False)
class TerraformProvider(): def __init__(self, configuration, terraform_workspace): log.info("Preparing terraform deployment") log.debug("Using workspace: {}".format(terraform_workspace)) self._backend_provider = get_backend_provider(configuration, terraform_workspace) self._controller = Terraform( working_dir=terraform_workspace, variables=configuration["terraform"]["parameters"]) self._controller.init(capture_output=False, force_copy=IsFlagged) @stage("Terraform deploy") def deploy(self): log.info("Deploying terraform infrastructure") self._backend_provider.init_remote_backend() self._controller.apply(capture_output=False, skip_plan=True) output = self._controller.output() artifact.create("terraform_output", content=json.dumps(output)) def destroy(self): log.info("Destroying terraform infrastructure") self._controller.destroy(capture_output=False) self._backend_provider.remove_remote_backend()
def test_apply_with_var_file(self, caplog: LogCaptureFixture): with caplog.at_level(logging.INFO): tf = Terraform(working_dir=current_path) folder = "var_to_output" tf.init(folder) tf.apply( folder, var_file=os.path.join(current_path, "tfvar_files", "test.tfvars"), ) for log in caplog.messages: if log.startswith("Command: terraform apply"): assert log.count("-var-file=") == 1
def test_output(self, caplog: LogCaptureFixture, output_all: bool): expected_value = "test" required_output = "test_output" with caplog.at_level(logging.INFO): tf = Terraform(working_dir=current_path, variables={"test_var": expected_value}) tf.init("var_to_output") tf.apply("var_to_output") params = tuple() if output_all else (required_output, ) result = tf.output(*params) if output_all: assert result[required_output]["value"] == expected_value else: assert result == expected_value assert expected_value in caplog.messages[-1]
def vm_docker_deploy_old(config): # This script should deploy the instance and return the output/logs after the test has finished file_dir = os.path.dirname(os.path.realpath(__file__)) provider = config["params"]["Provider"][0] ### Check that a selection was made if config["selection"]["instance"] is None: config["logs"] = None return config ### Setup terraform objects instance_wkdir = file_dir + "/instance_deploy/" + provider instance_tf = Terraform(working_dir=instance_wkdir) docker_tf = Terraform(file_dir + "/docker_deploy") tfstate_path = config["base_dir"] + '/tf_states/' + str(config["job_id"]) tfvars = config["base_dir"] + "/tfvars.tfvars" ## ALSO DIRECT TO A VARS.TF IN THE BASE_DIR instance_tf.init( backend_config={'path': tfstate_path + '/terraform.tfstate'}) instance_tf.apply(var_file=tfvars, lock=False, var={'instance_type': config["selection"]["instance"]}, skip_plan=True) docker_tf.init(backend_config={ 'path': tfstate_path + '/docker_tfstate/terraform.tfstate' }) docker_tf.apply(var_file=tfvars, lock=False, var={'tfstate_path': tfstate_path}, skip_plan=True) docker_tf.init(backend_config={ 'path': tfstate_path + '/docker_tfstate/terraform.tfstate' }) logs = docker_tf.output() config["logs"] = logs docker_tf.init(backend_config={ 'path': tfstate_path + '/docker_tfstate/terraform.tfstate' }) docker_tf.destroy(auto_approve=True) instance_tf.init( backend_config={'path': tfstate_path + '/terraform.tfstate'}) instance_tf.destroy(auto_approve=True) return config
def setup_terraform(version, environment, aws_region, terraform_bin_path, s3_bucket_name): print(f"deploy test to region {aws_region}") tf = Terraform(working_dir=full_path, terraform_bin_path=terraform_bin_path) var_tf = { "module_version": version, "tiingo_api_key": os.environ.get("TIINGO_API_KEY"), "aws_region": aws_region, "environment": environment, "s3_bucket_name": s3_bucket_name, } tf.init() ret_code, out, err = tf.apply(skip_plan=True, var=var_tf) if ret_code != 0: print(err) ret_code, out, err = tf.destroy(var=var_tf) raise Exception("Error applying terraform. Error \n {}".format(err)) yield ret_code, out, err = tf.destroy(var=var_tf) if ret_code != 0: print(err) raise Exception("Error detroying terraform. Error \n {}".format(err))
def terraform_apply(env_data, tf: Terraform): retry_count = 0 return_code = 0 while retry_count < 5: logger.debug("Try {}".format(retry_count)) return_code, stdout, stderr = tf.apply(skip_plan=True, var_file=env_data, capture_output=True) logger.debug('Terraform apply return code is {}'.format(return_code)) logger.debug('Terraform apply stdout is {}'.format(stdout)) logger.debug("Terraform apply stderr is {}".format(stderr)) retry_count += 1 if return_code == 0: break time.sleep(30) if return_code == 0: show_return_code, tf_state, stdout = tf.show(json=True) logger.debug( 'Terraform show return code is {}'.format(show_return_code)) logger.debug('Terraform show stdout is {}'.format(stdout)) tf_outputs = tf.output() for output_value in tf_outputs: logger.debug('Terraform output value is {}'.format(output_value)) else: # TODO get output for errors tf_state = {} tf_outputs = {} traceback.print_stack() return { "tf_return_code": return_code, "tf_outputs": tf_outputs, "tf_state": tf_state }
def execute_terraform(working_dir): """Execute terraform code to setup cloud resources, including servers, networks and so on. Arguments: working_dir {string} -- The path of terraform working direcory. Returns: string -- return_code, stdout, stderr. """ tf = Terraform(working_dir=working_dir) hiden_dir = path.realpath(path.join(working_dir, ".terraform")) # Init terraform env. if not path.isdir(hiden_dir): tf.init() # Run terraform plan. plan_return_code, plan_stdout, plan_stderr = tf.plan() # If there is no error, the stderr returned is unicode instead of a None type. So have to # Check the length of the stderr. if len(plan_stderr) == 0: print(plan_stdout) input_str = raw_input("Do you want to perform these actions? Only 'yes' will be accepted to approve.\n Enter a value:") if input_str == "yes": print("Start setting up cloud resources ...") return tf.apply(skip_plan=True) else: print("Apply cancelled.") sys.exit() else: print(plan_stderr) sys.exit()
def create_bastion(plateform): tf = Terraform(working_dir='terraform/layer-bastion') code, _, _ = tf.cmd("workspace select " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) if code == 1: tf.cmd("workspace new " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) code, _, _ = tf.apply(var={ 'region': plateform['region'], 'remote_bucket': plateform['remote-bucket'], 'prefix_bucket': plateform['prefix-bucket'], 'gcp-project': plateform['gcp-project'], 'instance_type': plateform['infrastructure']['bastion']['instance-type'], 'instance_image': plateform['infrastructure']['bastion']['image'], 'preemptible': plateform['infrastructure']['bastion']['preemptible'], }, capture_output=False, no_color=IsNotFlagged, skip_plan=True, auto_approve=True) if code != 0: raise Exception("error in Terraform layer-bastion")
def create_project(): tf = Terraform(working_dir='terraform/layer-project') code, _, _ = tf.apply(capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) if code != 0: raise Exception("error in Terraform layer-project")
class Terraform: def __init__(self, init): from python_terraform import Terraform as PythonTerraform self.terraform = PythonTerraform(working_dir='terraform') Path(self.working_dir).mkdir(exist_ok=True) if init: return_code, _, err = self.terraform.init( dir_or_plan=self.working_dir) if return_code != 0: raise CwsCommandError(err) @property def working_dir(self): return self.terraform.working_dir def init(self): return_code, _, err = self.terraform.init() if return_code != 0: raise CwsCommandError(err) def apply(self, workspace, targets): self.select_workspace(workspace) return_code, _, err = self.terraform.apply(target=targets, skip_plan=True, input=False, raise_on_error=False, parallelism=1) if return_code != 0: raise CwsCommandError(err) def destroy(self, workspace, targets): self.select_workspace(workspace) return_code, _, err = self.terraform.destroy(target=targets) if return_code != 0: raise CwsCommandError(err) def output(self): self.select_workspace("default") values = self.terraform.output(capture_output=True) return {key: value['value'] for key, value in values.items()} if values else "{}" def workspace_list(self): self.select_workspace("default") return_code, out, err = self.terraform.cmd('workspace', 'list') if return_code != 0: raise CwsCommandError(err) values = out[1:].translate(str.maketrans('', '', ' \t\r')).split('\n') return filter(None, values) def select_workspace(self, workspace): return_code, out, err = self.terraform.workspace('select', workspace) if workspace != 'default' and return_code != 0: _, out, err = self.terraform.workspace('new', workspace, raise_on_error=True) if not (Path(self.working_dir) / '.terraform').exists(): self.terraform.init(input=False, raise_on_error=True)
def test_apply(self, folder, variables, var_files, expected_output, options): tf = Terraform(working_dir=current_path, variables=variables, var_file=var_files) tf.init(folder) ret, out, err = tf.apply(folder, **options) assert ret == 0 assert expected_output in out.replace("\n", "").replace(" ", "") assert err == ""
def run_terraform(directory, terraform_vars, target_module): terraform = Terraform(directory) terraform.init(from_module=target_module) with open(directory + "terraform.tfvars.json", "w") as fh_: fh_.write(json.dumps(terraform_vars)) ret_code, stdout, stderr = (terraform.apply(auto_approve=True, capture_output=False, raise_on_error=True))
def create_terraform_stack(cluster_name, tf_vars, dir_path, state_path): hostname = re.sub(r"[^a-zA-Z0-9]+", '-', cluster_name).lower() tf_vars['hostname'] = hostname state_file = "{}/{}.tfstate".format(state_path, tf_vars['cluster_uuid']) tf_vars_file = create_tf_vars_file(state_path, tf_vars) tf = Terraform(dir_path) return_code, stdout, stderr = tf.get(capture_output=False) return_code, stdout, stderr = tf.init(capture_output=False) return_code, stdout, stderr = tf.apply(var_file=tf_vars_file, skip_plan=True, auto_approve=IsFlagged, capture_output=False, state=state_file) return return_code, stdout, stderr
def test_override_default(self, folder, variables): tf = Terraform(working_dir=current_path, variables=variables) tf.init(folder) ret, out, err = tf.apply( folder, var={"test_var": "test2"}, no_color=IsNotFlagged, ) out = out.replace("\n", "") assert "\x1b[0m\x1b[1m\x1b[32mApply" in out out = tf.output("test_output") assert "test2" in out
def apply_terraform(self, cluster, hosts_dict): if not self.working_path: self.working_path = create_terrafrom_working_dir(cluster_name=cluster.name) generate_terraform_file(self.working_path, self.cloud_config_path, cluster.plan.mixed_vars, hosts_dict) self.init_terraform() t = Terraform(working_dir=self.working_path) p, _, _ = t.apply('./', refresh=True, skip_plan=True, no_color=IsNotFlagged, synchronous=False) for i in p.stdout: print(i.decode()) _, err = p.communicate() print(err.decode()) return p.returncode == 0
class TerraformUtils: VAR_FILE = "terraform.tfvars.json" STATE_FILE = "terraform.tfstate" def __init__(self, working_dir: str): logging.info("TF FOLDER %s ", working_dir) self.working_dir = working_dir self.var_file_path = os.path.join(working_dir, self.VAR_FILE) self.tf = Terraform(working_dir=working_dir, state=self.STATE_FILE, var_file=self.VAR_FILE) self.init_tf() def init_tf(self) -> None: self.tf.cmd("init -plugin-dir=/root/.terraform.d/plugins/", raise_on_error=True) def apply(self, refresh: bool = True) -> None: return_value, output, err = self.tf.apply(no_color=IsFlagged, refresh=refresh, input=False, skip_plan=True) if return_value != 0: message = f"Terraform apply failed with return value {return_value}, output {output} , error {err}" logging.error(message) raise Exception(message) def change_variables(self, variables: Dict[str, str], refresh: bool = True) -> None: with open(self.var_file_path, "r+") as _file: tfvars = json.load(_file) tfvars.update(variables) _file.seek(0) _file.truncate() json.dump(tfvars, _file) self.apply(refresh=refresh) def get_state(self) -> str: self.tf.read_state_file(self.STATE_FILE) return self.tf.tfstate def set_new_vips(self, api_vip: str, ingress_vip: str) -> None: self.change_variables(variables={ "api_vip": api_vip, "ingress_vip": ingress_vip }, refresh=True) def destroy(self) -> None: self.tf.destroy(force=True, input=False, auto_approve=True)
def apply(): tf = Terraform(working_dir='./terraform') parser = SafeConfigParser() config = os.path.expanduser('~/.aws/config') parser.read(config) if not parser.has_section('profile pollexy'): print "You need to run 'pollexy credentials configure'" return region = parser.get('profile pollexy', 'region') print 'Applying environment . . . ' code, stdout, stderr = tf.apply(var={'aws_region': region}) if (stderr): print stderr else: print stdout
def apply_tf(working_dir, vars, description): """ Handles terraform operations and returns variables in outputs.tf as a dict. :param working_dir: Directory that contains the tf files :param vars: Additional variables passed in to override defaults equivalent to -var :param description: Description of the deployment for logging purposes :return: return_code - 0 for success or other for failure outputs - Dictionary of the terraform outputs defined in the outputs.tf file """ # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output capture_output = kwargs.pop('capture_output', False) if capture_output is True: stderr = subprocess.PIPE stdout = subprocess.PIPE else: # if capture output is False, then everything will essentially go to stdout and stderrf stderr = sys.stderr stdout = sys.stdout start_time = time.asctime() print('Starting Deployment at {}\n'.format(start_time)) # Create Bootstrap tf = Terraform(working_dir=working_dir) tf.cmd('init') if run_plan: # print('Calling tf.plan') tf.plan(capture_output=False) return_code, stdout, stderr = tf.apply(vars=vars, capture_output=capture_output, skip_plan=True, **kwargs) outputs = tf.output() logger.debug('Got Return code {} for deployment of {}'.format( return_code, description)) return (return_code, outputs)
def create(pathToInf): # Initalize a terraform object # Make the working directory the openstack directory terra = Terraform(pathToInf) # Apply the IAC in the openstack directory and skip the planning Stage return_code, stdout, stderr = terra.apply(skip_plan=True) # Get the outputs from the apply outputs = terra.output() print(stderr) print(stdout) print(outputs) # Return the outputs return outputs, return_code
def apply_terraform(self, cluster): vars = cluster.plan.mixed_vars hosts = [] for host in cluster.terraform_hosts.all(): hosts.append(host.to_dict()) vars['hosts'] = hosts if not self.working_path: self.working_path = create_terrafrom_working_dir(cluster_name=cluster.name) generate_terraform_file(self.working_path, self.cloud_config_path, vars) self.init_terraform() t = Terraform(working_dir=self.working_path) p, _, _ = t.apply('./', refresh=True, skip_plan=True, no_color=IsNotFlagged, synchronous=False) for i in p.stdout: print(i.decode()) _, err = p.communicate() print(err.decode()) return p.returncode == 0
def orchestration(): # Initalize a terraform object # Make the working directory the openstack directory terra = Terraform("./openstack") # Apply the IAC in the openstack directory and skip the planning Stage return_code, stdout, stderr = terra.apply(skip_plan=True) # Print the results print(return_code) print(stdout) print(stderr) # Get the outputs from the apply outputs = terra.output() # Return the outputs return outputs
def build_controller(): bucket_backend = 'bucket=' + str(s3_bucket) table_backend = 'dynamodb_table=' + str(s3_bucket) backend_configs = [bucket_backend, table_backend] tf_base = Terraform(workdir_ctrl) tf_base.init(backend_config=backend_configs) #tf_base.plan(capture_output=False, var_file=tfvars) return_code = tf_base.apply(skip_plan=True, capture_output=False, var_file=tfvars) controller_outputs = tf_base.output(capture_output=True) if return_code[0] == 1: print("Something went wrong!") sys.exit(1) else: print("All good!") pass return controller_outputs
def create_data(plateform, admin_password, app_password, unique_id): tf = Terraform(working_dir='terraform/layer-data') code, _, _ = tf.cmd("workspace select " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) if code == 1: tf.cmd("workspace new " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) code, _, _ = tf.apply(var={ 'region': plateform['region'], 'remote_bucket': plateform['remote-bucket'], 'prefix_bucket': plateform['prefix-bucket'], 'gcp-project': plateform['gcp-project'], 'database_version': plateform['infrastructure']['cloudsql']['version'], 'database_instance_type': plateform['infrastructure']['cloudsql']['instance-type'], 'database_disk_size': plateform['infrastructure']['cloudsql']['disk-size'], 'admin_password': admin_password.decode("utf-8"), 'app_password': app_password.decode("utf-8"), "unique_id": unique_id, 'env': plateform['type'] }, capture_output=False, no_color=IsNotFlagged, skip_plan=True, auto_approve=True) if code != 0: raise Exception("error in Terraform layer-data")
def vm_provision(config): print( f"Provisioning {config['selection']['instance']} instance from {config['params']['Provider']}" ) file_dir = os.path.dirname(os.path.realpath(__file__)) provider = config["params"]["Provider"][0] ### Check that a selection was made if config["selection"]["instance"] is None: config["logs"] = None return config ### Setup terraform objects instance_wkdir = file_dir + "/instance_deploy/" + provider instance_tf = Terraform(working_dir=instance_wkdir) tfstate_path = config["base_dir"] + '/tf_states/' + str(config["job_id"]) tfvars = config["base_dir"] + "/tfvars.tfvars" instance_tf.init( backend_config={'path': tfstate_path + '/terraform.tfstate'}) signal.signal(signal.SIGINT, partial(keyboard_interrupt_handler, instance_tf, config)) apply = instance_tf.apply( var_file=tfvars, lock=False, var={'instance_type': config["selection"]["instance"]}, skip_plan=True) print(apply) find = re.search(r"docker_host_ip = [\d.]+", apply[1]) instance_ip = apply[1][(find.regs[0][0] + 17):find.regs[0][1]] # instance_tf.init(backend_config={'path':tfstate_path + '/terraform.tfstate'}) # instance_ip = instance_tf.output()["docker_host_ip"]["value"] print( f"{config['selection']['instance']} instance created at {instance_ip}") return config, instance_tf, instance_ip
def create_base(plateform): tf = Terraform(working_dir='terraform/layer-base') code, _, _ = tf.cmd("workspace select " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) if code == 1: tf.cmd("workspace new " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) code, _, _ = tf.apply(var={ 'region': plateform['region'], 'remote_bucket': plateform['remote-bucket'], 'prefix_bucket': plateform['prefix-bucket'], 'gcp-project': plateform['gcp-project'], 'range-ip': plateform['infrastructure']['range-ip'], 'range-ip-pod': plateform['infrastructure']['range-ip-pod'], 'range-ip-svc': plateform['infrastructure']['range-ip-svc'], 'range-plateform': plateform['infrastructure']['range-plateform'], 'allowed-ips': plateform['ips_whitelist'], 'env': plateform['type'] }, capture_output=False, no_color=IsNotFlagged, skip_plan=True, auto_approve=True) if code != 0: raise Exception("error in Terraform layer-base")
def create_kubernetes(plateform): tf = Terraform(working_dir='terraform/layer-kubernetes') code, _, _ = tf.cmd("workspace select " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) if code == 1: tf.cmd("workspace new " + plateform['name'], capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged) ip_1 = '10.0.0.1/32' ip_2 = '10.0.0.1/32' ip_3 = '10.0.0.1/32' ip_4 = '10.0.0.1/32' ip_5 = get('https://api.ipify.org').text + "/32" if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 1: ip_1 = plateform['infrastructure']['gke']['ips_whitelist'][0] + "/32" if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 2: ip_2 = plateform['infrastructure']['gke']['ips_whitelist'][1] + "/32" if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 3: ip_3 = plateform['infrastructure']['gke']['ips_whitelist'][2] + "/32" if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 4: ip_4 = plateform['infrastructure']['gke']['ips_whitelist'][3] + "/32" code, _, _ = tf.apply(var={ 'region': plateform['region'], 'remote_bucket': plateform['remote-bucket'], 'prefix_bucket': plateform['prefix-bucket'], 'gcp-project': plateform['gcp-project'], 'k8s-version': plateform['infrastructure']['gke']['version'], 'preemptible': plateform['infrastructure']['gke']['preemptible'], 'instance-type': plateform['infrastructure']['gke']['instance-type'], 'white-ip-1': ip_1, 'white-ip-2': ip_2, 'white-ip-3': ip_3, 'white-ip-4': ip_4, 'white-ip-5': ip_5, 'min_node': plateform['infrastructure']['gke']['min'], 'max_node': plateform['infrastructure']['gke']['max'], 'range_ip_master': plateform['infrastructure']['range-ip-master'] }, capture_output=False, no_color=IsNotFlagged, skip_plan=True, auto_approve=True) print("Post Apply script execution...") subprocess.call([ "terraform/layer-kubernetes/apply_post.sh", plateform['name'], plateform['gcp-project'] ]) if code != 0: raise Exception("error in Terraform layer-kubernetes")
def main(fwUsername, fwPasswd): albDns = '' nlbDns = '' fwMgt = '' # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False deployment_status = {} kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output # capture_output = kwargs.pop('capture_output', True) # # if capture_output is True: # stderr = subprocess.PIPE # stdout = subprocess.PIPE # else: # stderr = sys.stderr # stdout = sys.stdout # # Build Infrastructure # tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, **kwargs) #return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("WebInDeploy failed") deployment_status = {'WebInDeploy': 'Fail'} write_status_file(deployment_status) exit() else: deployment_status = {'WebInDeploy': 'Success'} write_status_file(deployment_status) albDns = tf.output('ALB-DNS') fwMgt = tf.output('MGT-IP-FW-1') nlbDns = tf.output('NLB-DNS') # fwUsername = "******" # fwPasswd = "PaloAlt0!123!!" fw_trust_ip = fwMgt # # Apply WAF Rules # tf = Terraform(working_dir='./waf_conf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying WAF config to App LB") if run_plan: tf.plan(capture_output=False, var={'alb_arn': nlbDns}, **kwargs) return_code3, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, var={ 'alb_arn': nlbDns, 'int-nlb-fqdn': nlbDns }, **kwargs) if return_code3 != 0: logger.info("waf_conf failed") deployment_status.update({'waf_conf': 'Fail'}) write_status_file(deployment_status) exit() else: deployment_status.update({'waf_conf': 'Success'}) write_status_file(deployment_status) logger.info("Got these values from output of first run\n\n") logger.info("ALB address is {}".format(albDns)) logger.info("nlb address is {}".format(nlbDns)) logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # class FWNotUpException(Exception): pass err = 'no' api_key = '' api_key = getApiKey(fw_trust_ip, fwUsername, fwPasswd) while True: err = getFirewallStatus(fw_trust_ip, api_key) if err == 'cmd_error': logger.info("Command error from fw ") #raise FWNotUpException('FW is not up! Request Timeout') elif err == 'no': logger.info("FW is not up...yet") print("FW is not up...yet") time.sleep(60) continue #raise FWNotUpException('FW is not up!') elif err == 'almost': logger.info("MGT up waiting for dataplane") time.sleep(20) continue elif err == 'yes': logger.info("[INFO]: FW is up") break fw = firewall.Firewall(hostname=fw_trust_ip, api_username=fwUsername, api_password=fwPasswd) logger.info("Updating firewall with latest content pack") updateHandle = updater.ContentUpdater(fw) updateHandle.download() logger.info("Waiting 3 minutes for content update to download") time.sleep(210) updateHandle.install() # # Configure Firewall # tf = Terraform(working_dir='./WebInFWConf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") if run_plan: tf.plan(capture_output=False, var={ 'mgt-ipaddress-fw1': fwMgt, 'int-nlb-fqdn': nlbDns }) return_code2, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, var={ 'mgt-ipaddress-fw1': fwMgt, 'nlb-dns': nlbDns, 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key }, **kwargs) #return_code2 = 0 if return_code2 != 0: logger.info("WebFWConfy failed") deployment_status.update({'WebFWConfy': 'Fail'}) write_status_file(deployment_status) exit() else: deployment_status.update({'WebFWConf': 'Success'}) write_status_file(deployment_status) logger.info("Commit changes to firewall") fw.commit() logger.info('Checking if Jenkins Server is ready') # tf = Terraform(working_dir='./WebInDeploy') # albDns = tf.output('ALB-DNS') count = 0 max_tries = 3 while True: if count < max_tries: res = getServerStatus(albDns) if res == 'server_down': count = count + 1 time.sleep(2) continue elif res == 'server_up': break else: break logger.info('Jenkins Server is ready') logger.info('\n\n ### Deployment Complete ###') logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns))
parser.add_argument("--lambdas") return parser.parse_args() if __name__ == "__main__": args = parse_args() tf = Terraform(working_dir=args.workdir) tf.init() if args.apply: for lf in args.lambdas.split(","): name = lf.split("/")[-1].split(".")[0] zip_lambda_function(lf, f"{args.workdir}/{name}.zip") tf.apply( no_color=IsFlagged, refresh=False, var_file=args.tfvars, skip_plan=True, capture_output=False, ) elif args.destroy: tf.destroy( no_color=IsFlagged, var_file=args.tfvars, capture_output=False ) else: raise ValueError("Action not specified.")
def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_bucket): username = username password = password aws_access_key = aws_access_key aws_secret_key = aws_secret_key aws_region = aws_region ec2_key_pair = ec2_key_pair albDns = '' nlbDns = '' fwMgt = '' default_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region } WebInDeploy_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'bootstrap_s3bucket': bootstrap_bucket } waf_conf_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'alb_arn': albDns, 'nlb-dns': nlbDns } WebInFWConf_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'mgt-ipaddress-fw1': fwMgt, 'nlb-dns': nlbDns, 'username': username, 'password': password } # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output capture_output = kwargs.pop('capture_output', False) if capture_output is True: stderr = subprocess.PIPE stdout = subprocess.PIPE else: # if capture output is False, then everything will essentially go to stdout and stderrf stderr = sys.stderr stdout = sys.stdout start_time = time.asctime() print(f'Starting Deployment at {start_time}\n') # Build Infrastructure tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') if run_plan: # print('Calling tf.plan') tf.plan(capture_output=False, var=WebInDeploy_vars) return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, **kwargs) web_in_deploy_output = tf.output() logger.debug( 'Got Return code for deploy WebInDeploy {}'.format(return_code1)) # update_status('web_in_deploy_stdout', stdout) update_status('web_in_deploy_output', web_in_deploy_output) if return_code1 != 0: logger.info("WebInDeploy failed") update_status('web_in_deploy_status', 'error') update_status('web_in_deploy_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_deploy_status', 'success') albDns = tf.output('ALB-DNS') fwMgt = tf.output('MGT-IP-FW-1') nlbDns = tf.output('NLB-DNS') fwMgtIP = fwMgt WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt WebInFWConf_vars['nlb-dns'] = nlbDns WebInDeploy_vars['alb_dns'] = albDns WebInDeploy_vars['nlb-dns'] = nlbDns # # Apply WAF Rules # tf = Terraform(working_dir='./waf_conf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying WAF config to App LB") if run_plan: tf.plan(capture_output=capture_output, var=vars, **kwargs) return_code3, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, var=waf_conf_vars, **kwargs) waf_conf_out = tf.output() update_status('waf_conf_output', waf_conf_out) # update_status('waf_conf_stdout', stdout) # update_status('waf_conf_stderr', stderr) logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3)) if return_code3 != 0: logger.info("waf_conf failed") update_status('waf_conf_status', 'error') update_status('waf_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('waf_conf_status', 'success') logger.info("Got these values from output of first run\n\n") logger.info("ALB address is {}".format(albDns)) logger.info("nlb address is {}".format(nlbDns)) logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # # api_key = getApiKey(fwMgtIP, username, password) while True: err = getFirewallStatus(fwMgtIP, api_key) if err == 'cmd_error': logger.info("Command error from fw ") elif err == 'no': logger.info("FW is not up...yet") # print("FW is not up...yet") time.sleep(60) continue elif err == 'almost': logger.info("MGT up waiting for dataplane") time.sleep(20) continue elif err == 'yes': logger.info("FW is up") break logger.debug( 'Giving the FW another 10 seconds to fully come up to avoid race conditions' ) time.sleep(10) fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) logger.info("Updating firewall with latest content pack") update_fw(fwMgtIP, api_key) updateHandle = updater.ContentUpdater(fw) # updateHandle.download(fw) # logger.info("Waiting 3 minutes for content update to download") # time.sleep(210) # updateHandle.install() # # Configure Firewall # tf = Terraform(working_dir='./WebInFWConf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt if run_plan: tf.plan(capture_output=capture_output, var=WebInFWConf_vars) # update initial vars with generated fwMgt ip return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, var=WebInFWConf_vars, **kwargs) web_in_fw_conf_out = tf.output() update_status('web_in_fw_conf_output', web_in_fw_conf_out) # update_status('web_in_fw_conf_stdout', stdout) logger.debug( 'Got Return code for deploy WebInFwConf {}'.format(return_code2)) if return_code2 != 0: logger.error("WebFWConfy failed") update_status('web_in_fw_conf_status', 'error') update_status('web_in_fw_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_fw_conf_status', 'success') logger.info("Commit changes to firewall") fw.commit() logger.info("waiting for commit") time.sleep(60) logger.info("waiting for commit") # # Check Jenkins # logger.info('Checking if Jenkins Server is ready') # FIXME - add outputs for all 3 dirs res = getServerStatus(albDns) if res == 'server_up': logger.info('Jenkins Server is ready') logger.info('\n\n ### Deployment Complete ###') logger.info( '\n\n Connect to Jenkins Server at http://{}'.format(albDns)) else: logger.info('Jenkins Server is down') logger.info('\n\n ### Deployment Complete ###') # dump out status to stdout print(json.dumps(status_output))