with open(__terraform_main_tf, 'w') as fOut: __content = get_terraform_init_file_contents(logger=logger) print(__content, file=fOut) except Exception as ex: was_exception = True extype, ex, tb = sys.exc_info() logger.exception('EXCEPTION -> {}'.format(__terraform_main_tf), ex) finally: if (was_exception): logger.info('terraform init file EXCEPTION!') else: logger.info('terraform init saved -> "{}"'.format(__terraform_main_tf)) logger.info('END!!! terraform init.') else: logger.info('BEGIN: terraform dry-run (performing diagnostics but does not make any changes to any resources).') tf = Terraform(working_dir=terraform_root) resp = tf.init(backend=False) logger.info('BEGIN: Reading "{}".'.format(__docker_compose_location)) docker_compose_data = load_docker_compose(__docker_compose_location, logger=logger) if (is_json): __json = json.dumps(docker_compose_data, indent=3) save_docker_compose_data(__docker_compose_location, __json) logger.info('END!!! Reading "{}".'.format(__docker_compose_location)) was_exception = False try: with open(__terraform_main_tf, 'w') as fOut: __content = get_terraform_file_contents(docker_compose_data, do_init=False, aws_ecs_cluster_name=__aws_ecs_cluster_name, aws_ecs_repo_name=__aws_ecs_repo_name, docker_compose_location=__docker_compose_location, aws_creds=aws_creds, aws_config=aws_config, aws_creds_src=__aws_creds_src__, aws_config_src=__aws_config_src__, aws_default_region=__aws_default_region__, aws_cli_ecr_describe_repos=__aws_cli_ecr_describe_repos__, aws_ecs_compute_engine=__aws_ecs_compute_engine) #_content = pretty_json(__content) # this does not work, at this time. print(__content, file=fOut)
def main(aws_access_key, aws_secret_key, aws_region): albDns = '' nlbDns = '' fwMgt = '' # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False deployment_status = {} kwargs = {"auto-approve": True} vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, } # # Destroy Infrastructure # tf = Terraform(working_dir='./waf_conf') tf.cmd('init') if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.cmd('destroy', capture_output=True, vars=vars, **kwargs) # return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("Failed to destroy WebInDeploy ") exit() else: logger.info("Destroyed waf_conf Successfully") tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.cmd('destroy', capture_output=True, vars=vars, **kwargs) # return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("WebInDeploy destroyed") print('Failed to Destroy WebInDeploy') exit(1) else: print('Destroyed WebInDeploy Successfully') exit(0)
import os from python_terraform import Terraform, IsFlagged # DO_PATH = os.path.abspath(os.path.join(os.curdir, "configs", "digitalocean")) DO_PATH = os.path.abspath(os.path.join(os.curdir, "configs", "digitalocean")) AWS_PATH = os.path.abspath(os.path.join(os.curdir, "configs", "aws")) t = Terraform() def get_instance_type(provider: str, mem: str, cpu: str): resource = str(cpu) + "vcpu-" + str(mem) + "gb" mappings = {"DigitalOcean": {}} mappings["DigitalOcean"] = { "1vcpu-1gb": "s-1vcpu-1gb", "1vcpu-2gb": "s-1vcpu-2gb", "3vcpu-1gb": "s-3vcpu-1gb", "2vcpu-2gb": "s-2vcpu-2gb", "1vcpu-3gb": "s-1vcpu-3gb", "2vcpu-4gb": "s-2vcpu-4gb", "4vcpu-8gb": "s-4vcpu-8gb", } if provider in mappings: if resource in mappings[provider]: return mappings[provider][resource] return None async def father(details=None):
def init(pathtoPlatform): terra = Terraform(pathtoPlatform) return terra.init()
def terraform(terraform_dir): return Terraform(working_dir=terraform_dir)
class Provisioner: status = {} base_repo_url = 'https://github.com/rabe-gitops/base.git' base_local_path = 'base' tf = Terraform() def __init__(self, status): self.status = status def deploy(self): Repo.clone_from(self.base_repo_url, self.base_local_path) github = Github(base_url="https://api.github.com", login_or_token=self.status['github.token']) self.tf.init(os.path.join(self.base_local_path, self.status['cloud'].lower(), 'base-pipeline'), capture_output=True) os.environ['TF_VAR_GITHUB_TOKEN'] = self.status.pop('github.token') os.environ['TF_VAR_WEBHOOK_SECRET'] = ''.join( random.choices(string.ascii_uppercase + string.digits, k=12)) self.tf.apply(os.path.join(self.base_local_path, self.status['cloud'].lower(), 'base-pipeline'), input=False, no_color=IsFlagged, compact_warnings=IsFlagged, capture_output=False, auto_approve=IsFlagged, skip_plan=True, var={ 'PROJECT': self.status['project'], 'AWS_PROFILE': self.status['aws.profile'], 'AWS_REGION': self.status['aws.region'], 'GITHUB_OWNER': self.status['github.owner'], 'GITHUB_REPOSITORY': self.status['github.repo'], 'GITHUB_BRANCH': 'master' }) ssm_client = boto3.Session( profile_name=self.status['aws.profile']).client('ssm') ssm_client.put_parameter( Name='-'.join([self.status['project'].lower(), 'github', 'token']), Value=os.environ['TF_VAR_GITHUB_TOKEN'], Type='SecureString', Tags=[{ 'Key': 'Project', 'Value': self.status['project'] }, { 'Key': 'Name', 'Value': '-'.join([self.status['project'].lower(), 'github', 'token']) }]) ssm_client.put_parameter( Name='-'.join( [self.status['project'].lower(), 'webhook', 'secret']), Value=os.environ['TF_VAR_WEBHOOK_SECRET'], Type='SecureString', Tags=[{ 'Key': 'Project', 'Value': self.status['project'] }, { 'Key': 'Name', 'Value': '-'.join([self.status['project'].lower(), 'webhook', 'secret']) }]) del os.environ['TF_VAR_GITHUB_TOKEN'] del os.environ['TF_VAR_WEBHOOK_SECRET'] def delete(self): ssm_client = boto3.Session( profile_name=self.status['aws.profile']).client('ssm') os.environ['TF_VAR_GITHUB_TOKEN'] = ssm_client.get_parameter( Name='-'.join([self.status['project'].lower(), 'github', 'token']), WithDecryption=True)['Parameter']['Value'] ssm_client.delete_parameter( Name='-'.join([self.status['project'].lower(), 'github', 'token'])) ssm_client.delete_parameter(Name='-'.join( [self.status['project'].lower(), 'webhook', 'secret'])) self.tf.destroy(os.path.join(self.base_local_path, self.status['cloud'].lower(), 'base-pipeline'), input=False, no_color=IsFlagged, compact_warnings=IsFlagged, capture_output=False, auto_approve=IsFlagged, var={ 'PROJECT': self.status['project'], 'AWS_PROFILE': self.status['aws.profile'], 'AWS_REGION': self.status['aws.region'], 'GITHUB_OWNER': self.status['github.owner'], 'GITHUB_REPOSITORY': self.status['github.repo'], 'GITHUB_BRANCH': 'master' }) del os.environ['TF_VAR_GITHUB_TOKEN']
def __init__(self, working_dir): logging.info("TF FOLDER %s ", working_dir) self.working_dir = working_dir self.var_file_path = os.path.join(working_dir, self.VAR_FILE) self.tf = Terraform(working_dir=working_dir, state=self.STATE_FILE, var_file=self.VAR_FILE) self.init_tf()
def main(username, password): username = username password = password WebInDeploy_vars = {'Admin_Username': username, 'Admin_Password': password} WebInBootstrap_vars = { 'Admin_Username': username, 'Admin_Password': password } albDns = '' nlbDns = '' fwMgt = '' # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False deployment_status = {} kwargs = {"auto-approve": True} # # Destroy Infrastructure # tf = Terraform(working_dir='./WebInDeploy') rg_name = tf.output('RG_Name') attack_rg_name = tf.output('Attacker_RG_Name') logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format( rg_name, attack_rg_name)) WebInDeploy_vars.update({'RG_Name': rg_name}) WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name}) if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.cmd('destroy', var=WebInDeploy_vars, capture_output=False, **kwargs) # return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("Failed to destroy build ") exit() else: logger.info("Destroyed WebInDeploy ") WebInBootstrap_vars.update({'RG_Name': rg_name}) WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name}) tf = Terraform(working_dir='./WebInBootstrap') if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.cmd('destroy', var=WebInBootstrap_vars, capture_output=False, **kwargs) # return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("WebInBootstrap destroyed") deployment_status = {'WebInDeploy': 'Fail'} exit() else: deployment_status = {'WebInDeploy': 'Success'} exit()
def test_fmt(self, fmt_test_file): tf = Terraform(working_dir=current_path, variables={"test_var": "test"}) ret, out, err = tf.fmt(diff=True) assert ret == 0
#!python from python_terraform import Terraform, IsFlagged tf_base = Terraform(working_dir='iac/layer-base') tf_users = Terraform(working_dir='iac/layer-users') tf_base.apply(auto_approve=IsFlagged, capture_output=False) tf_users.apply(auto_approve=IsFlagged, capture_output=False) url_pf = tf_users.output('env_url') print("Test with: " + url_pf)
def test_pre_load_state_data(self): cwd = os.path.join(current_path, "test_tfstate_file") tf = Terraform(working_dir=cwd, state="tfstate.test") assert tf.tfstate.modules[0]["path"] == ["root"]
def test_state_default_backend(self): cwd = os.path.join(current_path, "test_tfstate_file3") tf = Terraform(working_dir=cwd) tf.read_state_file() assert tf.tfstate.modules[0]["path"] == ["default_backend"]
def test_options(self, cmd, args, options, fmt_test_file): tf = Terraform(working_dir=current_path) ret, out, err = getattr(tf, cmd)(*args, **options) assert ret == 0 assert out == ""
def create_sandbox(sandboxdata): ec_config = EagleConfigHelper.config_dict terraform_source_path = get_terraform_path('sandbox_creation') terraform_state_bucket = ec_config['terraform_state_bucket'] terraform_backend_prefix = get_sandbox_backend_prefix(sandboxdata.get("id"), ec_config['tb_discriminator']) tf_data = dict() try: sandbox_id = sandboxdata['id'] logger.debug("sandbox_id is %s", sandbox_id) random_string = random_element(num_chars=6) tf_data['sandbox_id'] = sandbox_id deployment_folder_id = sandboxdata['deploymentFolderId'] tf_data['deployment_folder_id'] = deployment_folder_id tf_data['sandbox_name'] = "{}-{}".format(sandboxdata["name"], random_string) # TODO rename tf_data['sandbox_folder_name'] = "{}-{}".format(folderize(sandboxdata["name"]), random_string) # TODO rename region = ec_config['region'] tf_data['region'] = region tf_data['billing_account'] = ec_config['billing_account'] tb_discriminator = ec_config['tb_discriminator'] tf_data['tb_discriminator'] = tb_discriminator # added to ensure all resources can be deleted and recreated # TODO pass region_zone in - comes from UI? tf_data['region_zone'] = region + "-b" logger.debug("tf_data {}".format(tf_data)) tf_data['sandbox_project_id'] = "sandbox-{}-{}".format(random_element(num_chars=6), tb_discriminator) iam_accounts = list() team_cloud_identity_group = sandboxdata.get('teamCloudIdentityGroup', None) if team_cloud_identity_group != None: iam_accounts.append("group:{}".format(team_cloud_identity_group)) tf_data["iam_accounts"] = iam_accounts labels = dict() labels['environment'] = "sandbox" labels['team'] = labellize(sandboxdata['teamName']) labels['created-by'] = labellize(sandboxdata.get('createdBy', 'labeltba')) labels['cost-code'] = labellize(sandboxdata['costCode']) labels['business-unit'] = labellize(sandboxdata['businessUnit']) labels['sandbox-id'] = sandbox_id tf_data['labels'] = labels except Exception as ex: logger.debug(traceback.format_exc()) traceback.format_exc() raise DacError(ex, "Error occurred in deploy sandbox") if mock_mode: return mock_response() else: # Call terraform tf = Terraform(working_dir=terraform_source_path, variables=tf_data) terraform_init(terraform_backend_prefix, terraform_state_bucket, tf) response = terraform_apply(None, tf) logger.debug("response {}".format(response)) return response
def apply_configuration(app_work_dir): logger.info('Running Terraform') # Prepare Terraform tf = Terraform( working_dir=app_work_dir, terraform_bin_path=os.path.join(os.environ.get('HOME'), 'terraform'), ) # Initialise Terraform configuration status, stdout, stderr = tf.init() if status: for line in stderr.split('\n'): logger.error(line) raise Exception('`terraform init` failed') for line in stdout.split('\n'): logger.debug(line) # Iterate Terraform apply attempts count = 0 retry = True while retry and count < 10: count += 1 retry = False # Attempt apply logger.info('Apply attempt #{}'.format(count)) status, stdout, stderr = tf.apply( var={ 'docker_host': os.environ.get('DOCKER_HOST'), 'registry_host': os.environ.get('REGISTRY_HOST'), }, skip_plan=True, # ^ `skip_plan` replaces auto-approve, intentionally or otherwise # https://github.com/beelit94/python-terraform/issues/84#issuecomment-648896385 ) # Record success and exit if not status: logger.info('Apply successful') for line in stdout.split('\n'): logger.debug(line) return # Log apply error logger.info('Apply failed') for line in stderr.split('\n'): logger.debug(line) # Attempt resolution if status: # Missing Docker image? missing_image = identify_missing_image(app_work_dir, stderr) if missing_image: logger.info('Identified missing Docker image - {}'.format( missing_image)) gateway.request_image_transfer.delay(missing_image).wait() retry = True continue # Accept defeat logger.error( 'Apply iteration failed to succeed in {} attempts'.format(count)) raise Exception('`terraform apply` failed too many times')
""" Configure terraform script to run desired build """ from os.path import dirname, join, abspath from python_terraform import Terraform WORKING_DIR = dirname(abspath(__file__)) ANSIBLE_DIR = join(dirname(abspath(dirname(abspath(__file__)))), 'ansible') print("BUILD DEBUG: Working in: {}".format(WORKING_DIR)) print("BUILD DEBUG: Ansible in: {}".format(ANSIBLE_DIR)) TF = Terraform(working_dir=WORKING_DIR) EXAMPLE_VARS = { "container_name" : "bodaddy", "image_name": "thedosh/ubuntu14", "tags": "vuln-ftp", # these tags are for what vuln to install, see ansible/playbook.yml for more tags "ansible_dir": ANSIBLE_DIR } from .api import create, destroy, get_running_machines
def remotestate(): tf_state = Terraform(remotestate_dic) tf_state.init() #tf_state.plan(capture_output=False, var_file=tfvars) tf_state.apply(skip_plan=True, capture_output=False, var_file=tfvars) state_output = tf_state.output(capture_output=True)
def init_terraform(dir='.', backend_config=''): tf = Terraform(working_dir=dir) tf.init(capture_output=False, backend_config=backend_config) return tf
def create_solution(solutiondata): ec_config = EagleConfigHelper.config_dict terraform_source_path = get_terraform_path('solution_creation') terraform_state_bucket = ec_config['terraform_state_bucket'] terraform_backend_prefix = get_solution_backend_prefix(solutiondata.get("id"), ec_config['tb_discriminator']) tf_data = dict() try: solution_id = solutiondata['id'] logger.debug("solution_id is %s", solution_id) tf_data['solution_id'] = solution_id tf_data['cost_code'] = labellize(solutiondata['costCentre']) tf_data['business_unit'] = labellize(solutiondata['businessUnit']) deployment_folder_id = solutiondata['deploymentFolderId'] tf_data['deployment_folder_id'] = deployment_folder_id tf_data['created_by'] = labellize(solutiondata.get('createdBy', 'labeltba')) environments = solutiondata.get("environments", list()) for environment in environments: environment['name'] = sanitize(environment['name']) environment['shared_vpc_host_project'] = environment['sharedVPCProjectId'] tf_data['environments'] = environments tf_data['solution_name'] = solutiondata["name"] team: dict = solutiondata['team'] tf_data['team'] = labellize(team['name']) team_members = list() for team_member in team['teamMembers']: member_email = team_member['user']['email'] team_members.append("user:{}".format(member_email)) tf_data['team_members'] = [x for x in team_members] region = ec_config['region'] tf_data['region'] = region tf_data['billing_account'] = ec_config['billing_account'] shared_vpc_host_project = ec_config['shared_vpc_host_project'] if shared_vpc_host_project != None: tf_data['shared_vpc_host_project'] = shared_vpc_host_project else: logger.info("Shared VPC Host Project not supplied - network will not be overridden") tb_discriminator = ec_config['tb_discriminator'] tf_data['tb_discriminator'] = tb_discriminator # added to ensure all resources can be deleted and recreated tf_data['random_element'] = random_element(num_chars=6) # TODO pass region_zone in - comes from UI? tf_data['region_zone'] = region + "-b" logger.debug("tf_data {}".format(tf_data)) except Exception as ex: logger.debug(traceback.format_exc()) traceback.format_exc() raise DacError(ex, "Error occurred in deploy solution") if mock_mode: return mock_response() else: # Call terraform tf = Terraform(working_dir=terraform_source_path, variables=tf_data) terraform_init(terraform_backend_prefix, terraform_state_bucket, tf) response = terraform_apply(None, tf) logger.debug("response {}".format(response)) add_access_to_folders(bottom_level_folder_id=deployment_folder_id, users=team_members, top_level_folder_id=ec_config["activator_folder_id"]) return response
def main(fwUsername, fwPasswd): albDns = '' nlbDns = '' fwMgt = '' # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False deployment_status = {} kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output # capture_output = kwargs.pop('capture_output', True) # # if capture_output is True: # stderr = subprocess.PIPE # stdout = subprocess.PIPE # else: # stderr = sys.stderr # stdout = sys.stdout # # Build Infrastructure # tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') if run_plan: print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, **kwargs) #return_code1 =0 print('Got return code {}'.format(return_code1)) if return_code1 != 0: logger.info("WebInDeploy failed") deployment_status = {'WebInDeploy': 'Fail'} write_status_file(deployment_status) exit() else: deployment_status = {'WebInDeploy': 'Success'} write_status_file(deployment_status) albDns = tf.output('ALB-DNS') fwMgt = tf.output('MGT-IP-FW-1') nlbDns = tf.output('NLB-DNS') # fwUsername = "******" # fwPasswd = "PaloAlt0!123!!" fw_trust_ip = fwMgt # # Apply WAF Rules # tf = Terraform(working_dir='./waf_conf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying WAF config to App LB") if run_plan: tf.plan(capture_output=False, var={'alb_arn': nlbDns}, **kwargs) return_code3, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, var={ 'alb_arn': nlbDns, 'int-nlb-fqdn': nlbDns }, **kwargs) if return_code3 != 0: logger.info("waf_conf failed") deployment_status.update({'waf_conf': 'Fail'}) write_status_file(deployment_status) exit() else: deployment_status.update({'waf_conf': 'Success'}) write_status_file(deployment_status) logger.info("Got these values from output of first run\n\n") logger.info("ALB address is {}".format(albDns)) logger.info("nlb address is {}".format(nlbDns)) logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # class FWNotUpException(Exception): pass err = 'no' api_key = '' api_key = getApiKey(fw_trust_ip, fwUsername, fwPasswd) while True: err = getFirewallStatus(fw_trust_ip, api_key) if err == 'cmd_error': logger.info("Command error from fw ") #raise FWNotUpException('FW is not up! Request Timeout') elif err == 'no': logger.info("FW is not up...yet") print("FW is not up...yet") time.sleep(60) continue #raise FWNotUpException('FW is not up!') elif err == 'almost': logger.info("MGT up waiting for dataplane") time.sleep(20) continue elif err == 'yes': logger.info("[INFO]: FW is up") break fw = firewall.Firewall(hostname=fw_trust_ip, api_username=fwUsername, api_password=fwPasswd) logger.info("Updating firewall with latest content pack") updateHandle = updater.ContentUpdater(fw) updateHandle.download() logger.info("Waiting 3 minutes for content update to download") time.sleep(210) updateHandle.install() # # Configure Firewall # tf = Terraform(working_dir='./WebInFWConf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") if run_plan: tf.plan(capture_output=False, var={ 'mgt-ipaddress-fw1': fwMgt, 'int-nlb-fqdn': nlbDns }) return_code2, stdout, stderr = tf.apply(capture_output=False, skip_plan=True, var={ 'mgt-ipaddress-fw1': fwMgt, 'nlb-dns': nlbDns, 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key }, **kwargs) #return_code2 = 0 if return_code2 != 0: logger.info("WebFWConfy failed") deployment_status.update({'WebFWConfy': 'Fail'}) write_status_file(deployment_status) exit() else: deployment_status.update({'WebFWConf': 'Success'}) write_status_file(deployment_status) logger.info("Commit changes to firewall") fw.commit() logger.info('Checking if Jenkins Server is ready') # tf = Terraform(working_dir='./WebInDeploy') # albDns = tf.output('ALB-DNS') count = 0 max_tries = 3 while True: if count < max_tries: res = getServerStatus(albDns) if res == 'server_down': count = count + 1 time.sleep(2) continue elif res == 'server_up': break else: break logger.info('Jenkins Server is ready') logger.info('\n\n ### Deployment Complete ###') logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns))
def main(username, password, aws_access_key, aws_secret_key, aws_region, ec2_key_pair, bootstrap_bucket): username = username password = password aws_access_key = aws_access_key aws_secret_key = aws_secret_key aws_region = aws_region ec2_key_pair = ec2_key_pair albDns = '' nlbDns = '' fwMgt = '' default_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region } WebInDeploy_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'bootstrap_s3bucket': bootstrap_bucket } waf_conf_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'alb_arn': albDns, 'nlb-dns': nlbDns } WebInFWConf_vars = { 'aws_access_key': aws_access_key, 'aws_secret_key': aws_secret_key, 'aws_region': aws_region, 'ServerKeyName': ec2_key_pair, 'mgt-ipaddress-fw1': fwMgt, 'nlb-dns': nlbDns, 'username': username, 'password': password } # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output capture_output = kwargs.pop('capture_output', False) if capture_output is True: stderr = subprocess.PIPE stdout = subprocess.PIPE else: # if capture output is False, then everything will essentially go to stdout and stderrf stderr = sys.stderr stdout = sys.stdout start_time = time.asctime() print(f'Starting Deployment at {start_time}\n') # Build Infrastructure tf = Terraform(working_dir='./WebInDeploy') tf.cmd('init') if run_plan: # print('Calling tf.plan') tf.plan(capture_output=False, var=WebInDeploy_vars) return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, **kwargs) web_in_deploy_output = tf.output() logger.debug('Got Return code for deploy WebInDeploy {}'.format(return_code1)) # update_status('web_in_deploy_stdout', stdout) update_status('web_in_deploy_output', web_in_deploy_output) if return_code1 != 0: logger.info("WebInDeploy failed") update_status('web_in_deploy_status', 'error') update_status('web_in_deploy_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_deploy_status', 'success') albDns = tf.output('ALB-DNS') fwMgt = tf.output('MGT-IP-FW-1') nlbDns = tf.output('NLB-DNS') fw_trust_ip = fwMgt WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt WebInFWConf_vars['nlb-dns'] = nlbDns WebInDeploy_vars['alb_dns'] = albDns WebInDeploy_vars['nlb-dns'] = nlbDns # # Apply WAF Rules # tf = Terraform(working_dir='./waf_conf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying WAF config to App LB") if run_plan: tf.plan(capture_output=capture_output, var=vars, **kwargs) return_code3, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, var=waf_conf_vars, **kwargs) waf_conf_out = tf.output() update_status('waf_conf_output', waf_conf_out) # update_status('waf_conf_stdout', stdout) # update_status('waf_conf_stderr', stderr) logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3)) if return_code3 != 0: logger.info("waf_conf failed") update_status('waf_conf_status', 'error') update_status('waf_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('waf_conf_status', 'success') logger.info("Got these values from output of first run\n\n") logger.info("ALB address is {}".format(albDns)) logger.info("nlb address is {}".format(nlbDns)) logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # # api_key = getApiKey(fw_trust_ip, username, password) while True: err = getFirewallStatus(fw_trust_ip, api_key) if err == 'cmd_error': logger.info("Command error from fw ") elif err == 'no': logger.info("FW is not up...yet") # print("FW is not up...yet") time.sleep(60) continue elif err == 'almost': logger.info("MGT up waiting for dataplane") time.sleep(20) continue elif err == 'yes': logger.info("FW is up") break fw = firewall.Firewall(hostname=fw_trust_ip, api_username=username, api_password=password) logger.info("Updating firewall with latest content pack") updateHandle = updater.ContentUpdater(fw) updateHandle.download(fw) logger.info("Waiting 3 minutes for content update to download") time.sleep(210) updateHandle.install() # # Configure Firewall # tf = Terraform(working_dir='./WebInFWConf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt if run_plan: tf.plan(capture_output=capture_output, var=WebInFWConf_vars) # update initial vars with generated fwMgt ip return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, var=WebInFWConf_vars, **kwargs) web_in_fw_conf_out = tf.output() update_status('web_in_fw_conf_output', web_in_fw_conf_out) # update_status('web_in_fw_conf_stdout', stdout) logger.debug('Got Return code for deploy WebInFwConf {}'.format(return_code2)) if return_code2 != 0: logger.error("WebFWConfy failed") update_status('web_in_fw_conf_status', 'error') update_status('web_in_fw_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_fw_conf_status', 'success') logger.info("Commit changes to firewall") fw.commit() logger.info("waiting for commit") time.sleep(60) logger.info("waiting for commit") # # Check Jenkins # logger.info('Checking if Jenkins Server is ready') # FIXME - add outputs for all 3 dirs res = getServerStatus(albDns) if res == 'server_up': logger.info('Jenkins Server is ready') logger.info('\n\n ### Deployment Complete ###') logger.info('\n\n Connect to Jenkins Server at http://{}'.format(albDns)) else: logger.info('Jenkins Server is down') logger.info('\n\n ### Deployment Complete ###') # dump out status to stdout print(json.dumps(status_output))
def main(username, password, rg_name, azure_region): username = username password = password WebInBootstrap_vars = {'RG_Name': rg_name, 'Azure_Region': azure_region} WebInDeploy_vars = { 'Admin_Username': username, 'Admin_Password': password, 'Azure_Region': azure_region } WebInFWConf_vars = {'Admin_Username': username, 'Admin_Password': password} # Set run_plan to TRUE is you wish to run terraform plan before apply run_plan = False kwargs = {"auto-approve": True} # Class Terraform uses subprocess and setting capture_output to True will capture output capture_output = kwargs.pop('capture_output', False) if capture_output is True: stderr = subprocess.PIPE stdout = subprocess.PIPE else: # if capture output is False, then everything will essentially go to stdout and stderrf stderr = sys.stderr stdout = sys.stdout start_time = time.asctime() print(f'Starting Deployment at {start_time}\n') # Create Bootstrap tf = Terraform(working_dir='./WebInBootstrap') tf.cmd('init') if run_plan: # print('Calling tf.plan') tf.plan(capture_output=False) return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars, capture_output=capture_output, skip_plan=True, **kwargs) resource_group = tf.output('Resource_Group') bootstrap_bucket = tf.output('Bootstrap_Bucket') storage_account_access_key = tf.output('Storage_Account_Access_Key') web_in_bootstrap_output = tf.output() logger.debug( 'Got Return code for deploy WebInDeploy {}'.format(return_code1)) update_status('web_in_deploy_stdout', stdout) update_status('web_in_bootstrap_output', web_in_bootstrap_output) if return_code1 != 0: logger.info("WebInBootstrap failed") update_status('web_in_bootstap_status', 'error') update_status('web_in_bootstrap_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_bootstrap_status', 'success') share_prefix = 'jenkins-demo' share_name = create_azure_fileshare(share_prefix, bootstrap_bucket, storage_account_access_key) WebInDeploy_vars.update( {'Storage_Account_Access_Key': storage_account_access_key}) WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket}) WebInDeploy_vars.update({'RG_Name': resource_group}) WebInDeploy_vars.update({'Attack_RG_Name': resource_group}) WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name}) # Build Infrastructure tf = Terraform(working_dir='./WebInDeploy') # print("vars {}".format(WebInDeploy_vars)) tf.cmd('init') if run_plan: # print('Calling tf.plan') tf.plan(capture_output=False, var=WebInDeploy_vars) return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars, capture_output=capture_output, skip_plan=True, **kwargs) web_in_deploy_output = tf.output() logger.debug( 'Got Return code for deploy WebInDeploy {}'.format(return_code1)) update_status('web_in_deploy_stdout', stdout) update_status('web_in_deploy_output', web_in_deploy_output) if return_code1 != 0: logger.info("WebInDeploy failed") update_status('web_in_deploy_status', 'error') update_status('web_in_deploy_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_deploy_status', 'success') albDns = tf.output('ALB-DNS') fwMgt = tf.output('MGT-IP-FW-1') nlbDns = tf.output('NLB-DNS') fwMgtIP = tf.output('MGT-IP-FW-1') logger.info("Got these values from output \n\n") logger.info("AppGateway address is {}".format(albDns)) logger.info("Internal loadbalancer address is {}".format(nlbDns)) logger.info("Firewall Mgt address is {}".format(fwMgt)) # # Check firewall is up and running # # api_key = getApiKey(fwMgtIP, username, password) while True: err = getFirewallStatus(fwMgtIP, api_key) if err == 'cmd_error': logger.info("Command error from fw ") elif err == 'no': logger.info("FW is not up...yet") # print("FW is not up...yet") time.sleep(60) continue elif err == 'almost': logger.info("MGT up waiting for dataplane") time.sleep(20) continue elif err == 'yes': logger.info("FW is up") break logger.debug( 'Giving the FW another 10 seconds to fully come up to avoid race conditions' ) time.sleep(10) fw = firewall.Firewall(hostname=fwMgtIP, api_username=username, api_password=password) logger.info("Updating firewall with latest content pack") update_fw(fwMgtIP, api_key) # # Configure Firewall # WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP}) tf = Terraform(working_dir='./WebInFWConf') tf.cmd('init') kwargs = {"auto-approve": True} logger.info("Applying addtional config to firewall") WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt if run_plan: tf.plan(capture_output=capture_output, var=WebInFWConf_vars) # update initial vars with generated fwMgt ip return_code2, stdout, stderr = tf.apply(capture_output=capture_output, skip_plan=True, var=WebInFWConf_vars, **kwargs) web_in_fw_conf_out = tf.output() update_status('web_in_fw_conf_output', web_in_fw_conf_out) # update_status('web_in_fw_conf_stdout', stdout) logger.debug( 'Got Return code for deploy WebInFwConf {}'.format(return_code2)) if return_code2 != 0: logger.error("WebInFWConf failed") update_status('web_in_fw_conf_status', 'error') update_status('web_in_fw_conf_stderr', stderr) print(json.dumps(status_output)) exit(1) else: update_status('web_in_fw_conf_status', 'success') logger.info("Commit changes to firewall") fw.commit() logger.info("waiting for commit") time.sleep(60) logger.info("waiting for commit") # # Check Jenkins # logger.info('Checking if Jenkins Server is ready') # FIXME - add outputs for all 3 dirs res = getServerStatus(albDns) if res == 'server_up': logger.info('Jenkins Server is ready') logger.info('\n\n ### Deployment Complete ###') logger.info( '\n\n Connect to Jenkins Server at http://{}'.format(albDns)) else: logger.info('Jenkins Server is down') logger.info('\n\n ### Deployment Complete ###') # dump out status to stdout print(json.dumps(status_output))
#def install(package): # subprocess.call([sys.executable, "-m", "pip", "install", package]) # #install('python_terraform') # #try: # from python_terraform import Terraform #except ImportError: # install('python_terraform') # from python_terraform import Terraform urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) working_dir = "../deploy" tf = Terraform(working_dir=working_dir) outputs = tf.output() fw1_mgmt = outputs['fw1_public_ip']['value'] fw2_mgmt = outputs['fw2_public_ip']['value'] parser = argparse.ArgumentParser() parser.add_argument("-p", "--password", help="Example Password", type=str) args = parser.parse_args() username = "******" password = args.password # Get API Key url = "https://%s/api/?type=keygen&user=%s&password=%s" % (fw1_mgmt, username, password) response = requests.get(url, verify=False)