예제 #1
0
 def test_plan(self, plan, variables, expected_ret):
     tf = Terraform(working_dir=current_path, variables=variables)
     tf.init(plan)
     with pytest.raises(TerraformCommandError) as e:
         tf.plan(plan)
     assert (
         e.value.err ==
         """\nError: Missing required argument\n\nThe argument "region" is required, but was not set.\n\n"""
     )
예제 #2
0
def main():

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./waf_conf')

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=False,
                                          **kwargs)
    #return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy WebInDeploy ")

        exit()
    else:

        logger.info("Destroyed WebInDeploy ")

    tf = Terraform(working_dir='./WebInDeploy')

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=False,
                                          **kwargs)
    #return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy destroyed")
        deployment_status = {'WebInDeploy': 'Fail'}

        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        exit()
예제 #3
0
def apply_tf(working_dir, vars, description):
    """
    Handles terraform operations and returns variables in outputs.tf as a dict.
    :param working_dir: Directory that contains the tf files
    :param vars: Additional variables passed in to override defaults equivalent to -var
    :param description: Description of the deployment for logging purposes
    :return:    return_code - 0 for success or other for failure
                outputs - Dictionary of the terraform outputs defined in the outputs.tf file

    """
    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout

    start_time = time.asctime()
    print('Starting Deployment at {}\n'.format(start_time))

    # Create Bootstrap

    tf = Terraform(working_dir=working_dir)

    tf.cmd('init')
    if run_plan:

        # print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code, stdout, stderr = tf.apply(vars=vars,
                                           capture_output=capture_output,
                                           skip_plan=True,
                                           **kwargs)
    outputs = tf.output()

    logger.debug('Got Return code {} for deployment of  {}'.format(
        return_code, description))

    return (return_code, outputs)
예제 #4
0
def execute_terraform(working_dir):
    """Execute terraform code to setup cloud resources, including servers, networks and so on.
    
    Arguments:
        working_dir {string} -- The path of terraform working direcory.
    
    Returns:
        string -- return_code, stdout, stderr.
    """

    tf = Terraform(working_dir=working_dir)
    hiden_dir = path.realpath(path.join(working_dir, ".terraform"))
    # Init terraform env.
    if not path.isdir(hiden_dir):
        tf.init()

    # Run terraform plan.
    plan_return_code, plan_stdout, plan_stderr = tf.plan()
    
    # If there is no error, the stderr returned is unicode instead of a None type. So have to
    # Check the length of the stderr.
    if len(plan_stderr) == 0:
        print(plan_stdout)
        input_str = raw_input("Do you want to perform these actions? Only 'yes' will be accepted to approve.\n  Enter a value:")
        if input_str == "yes":
            print("Start setting up cloud resources ...")
            return tf.apply(skip_plan=True)
        else:
            print("Apply cancelled.")
            sys.exit()
    else:
        print(plan_stderr)
        sys.exit()
예제 #5
0
def check_config():
    defined_variables = []

    with open('configurations.json') as json_file:
        data = json.load(json_file)
    for machine in data["machines"]:
        if machine["name"] == "terraform":
            continue
        print(
            "\n",
            machine["name"],
        )
        if machine.get("variables") is None:
            print("MISSING VARIABLES KEY")
            return 1
        for _, value in machine["variables"].items():
            defined_variables += value

        if machine["variables"].get("api") is not None:
            missing_endpoints = []
            for api in machine["variables"]["api"]:
                if data.get("api_endpoints") is None or \
                  data["api_endpoints"].get(api) is None:
                    missing_endpoints.append(api)
            if missing_endpoints:
                print("MISSING ENDPOINTS FOR API VARIABLES: ",
                      missing_endpoints)
                return 1

        tf = Terraform(working_dir=machine["name"])
        code, err, log = tf.plan()
        if code:
            if log.find("No configuration files") != -1:
                print("NO TERRAFORM FILE FOUND, PLEASE CREATE ONE")
                return 1
            if log.find("No value for required variable"):
                variables = get_variables_from_log(log)
                missing_variables = []
                for var in variables:
                    if var not in defined_variables:
                        missing_variables.append(var)
                if missing_variables:
                    print(
                        "THERE ARE MISSING VARIABLES, WHICH ARE DEFINED IN %s, "
                        "PLEASE SPECIFY THEM IN config.json" % machine["name"])
                    print("MISSING VARIABLES: ", missing_variables)
                    return 1
            else:
                print("UNKNOWN ERROR")
                return 1
    return 0
예제 #6
0
def plan():
    tf = Terraform(working_dir='./terraform')
    parser = SafeConfigParser()
    config = os.path.expanduser('~/.aws/config')
    parser.read(config)
    if not parser.has_section('profile pollexy'):
        print "You need to run 'pollexy credentials configure'"
        return
    region = parser.get('profile pollexy', 'region')
    print 'Initializing environment . . . ' + region
    code, stdout, stderr = tf.init()
    print stderr
    print stdout

    print 'Planning environment . . . '
    code, stdout, stderr = tf.plan(var={'aws_region': region})
    if (stderr):
        print stderr
    else:
        print stdout
예제 #7
0
def terraform_plan(tf: Terraform):

    return_code, stdout, stderr = tf.plan(capture_output=True)
    logger.debug('Terraform plan return code is {}'.format(return_code))
    logger.debug('Terraform plan stdout is {}'.format(stdout))
    logger.debug('Terraform plan stderr is {}'.format(stderr))
예제 #8
0
def main(username, password, aws_access_key, aws_secret_key, aws_region,
         ec2_key_pair, bootstrap_bucket):
    username = username
    password = password
    aws_access_key = aws_access_key
    aws_secret_key = aws_secret_key
    aws_region = aws_region
    ec2_key_pair = ec2_key_pair
    albDns = ''
    nlbDns = ''
    fwMgt = ''

    default_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region
    }

    WebInDeploy_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'bootstrap_s3bucket': bootstrap_bucket
    }

    waf_conf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'alb_arn': albDns,
        'nlb-dns': nlbDns
    }

    WebInFWConf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'mgt-ipaddress-fw1': fwMgt,
        'nlb-dns': nlbDns,
        'username': username,
        'password': password
    }

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    # update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = fwMgt

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt
    WebInFWConf_vars['nlb-dns'] = nlbDns

    WebInDeploy_vars['alb_dns'] = albDns
    WebInDeploy_vars['nlb-dns'] = nlbDns

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}
    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=capture_output, var=vars, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=waf_conf_vars,
                                            **kwargs)

    waf_conf_out = tf.output()

    update_status('waf_conf_output', waf_conf_out)
    # update_status('waf_conf_stdout', stdout)
    # update_status('waf_conf_stderr', stderr)

    logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3))

    if return_code3 != 0:
        logger.info("waf_conf failed")
        update_status('waf_conf_status', 'error')
        update_status('waf_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('waf_conf_status', 'success')

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)
    updateHandle = updater.ContentUpdater(fw)

    # updateHandle.download(fw)
    # logger.info("Waiting 3 minutes for content update to download")
    # time.sleep(210)
    # updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebFWConfy failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
예제 #9
0
def main(fwUsername, fwPasswd):

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    # capture_output = kwargs.pop('capture_output', True)
    #
    # if capture_output is True:
    #     stderr = subprocess.PIPE
    #     stdout = subprocess.PIPE
    # else:
    #     stderr = sys.stderr
    #     stdout = sys.stdout

    #
    # Build Infrastructure
    #

    tf = Terraform(working_dir='./WebInDeploy')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            **kwargs)
    #return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        deployment_status = {'WebInDeploy': 'Fail'}
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        write_status_file(deployment_status)

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    # fwUsername = "******"
    # fwPasswd = "PaloAlt0!123!!"
    fw_trust_ip = fwMgt

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=False, var={'alb_arn': nlbDns}, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'alb_arn': nlbDns,
                                                'int-nlb-fqdn': nlbDns
                                            },
                                            **kwargs)

    if return_code3 != 0:
        logger.info("waf_conf failed")
        deployment_status.update({'waf_conf': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'waf_conf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    #

    class FWNotUpException(Exception):
        pass

    err = 'no'
    api_key = ''
    api_key = getApiKey(fw_trust_ip, fwUsername, fwPasswd)

    while True:
        err = getFirewallStatus(fw_trust_ip, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")
            #raise FWNotUpException('FW is not up!  Request Timeout')

        elif err == 'no':
            logger.info("FW is not up...yet")
            print("FW is not up...yet")
            time.sleep(60)
            continue
            #raise FWNotUpException('FW is not up!')
        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue
        elif err == 'yes':
            logger.info("[INFO]: FW is up")
            break

    fw = firewall.Firewall(hostname=fw_trust_ip,
                           api_username=fwUsername,
                           api_password=fwPasswd)
    logger.info("Updating firewall with latest content pack")
    updateHandle = updater.ContentUpdater(fw)

    updateHandle.download()

    logger.info("Waiting 3 minutes for content update to download")
    time.sleep(210)
    updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    if run_plan:
        tf.plan(capture_output=False,
                var={
                    'mgt-ipaddress-fw1': fwMgt,
                    'int-nlb-fqdn': nlbDns
                })

    return_code2, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'mgt-ipaddress-fw1': fwMgt,
                                                'nlb-dns': nlbDns,
                                                'aws_access_key':
                                                aws_access_key,
                                                'aws_secret_key':
                                                aws_secret_key
                                            },
                                            **kwargs)
    #return_code2 = 0
    if return_code2 != 0:
        logger.info("WebFWConfy failed")
        deployment_status.update({'WebFWConfy': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'WebFWConf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Commit changes to firewall")

    fw.commit()

    logger.info('Checking if Jenkins Server is ready')

    #    tf = Terraform(working_dir='./WebInDeploy')
    #   albDns = tf.output('ALB-DNS')
    count = 0
    max_tries = 3
    while True:
        if count < max_tries:
            res = getServerStatus(albDns)
            if res == 'server_down':
                count = count + 1
                time.sleep(2)
                continue
            elif res == 'server_up':
                break
        else:
            break
    logger.info('Jenkins Server is ready')
    logger.info('\n\n   ### Deployment Complete ###')
    logger.info('\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
예제 #10
0
def main(username, password, rg_name, azure_region):
    username = username
    password = password

    WebInBootstrap_vars = {'RG_Name': rg_name, 'Azure_Region': azure_region}

    WebInDeploy_vars = {
        'Admin_Username': username,
        'Admin_Password': password,
        'Azure_Region': azure_region
    }

    WebInFWConf_vars = {'Admin_Username': username, 'Admin_Password': password}

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Create Bootstrap

    tf = Terraform(working_dir='./WebInBootstrap')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False)
    return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    resource_group = tf.output('Resource_Group')
    bootstrap_bucket = tf.output('Bootstrap_Bucket')
    storage_account_access_key = tf.output('Storage_Account_Access_Key')
    web_in_bootstrap_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_bootstrap_output', web_in_bootstrap_output)

    if return_code1 != 0:
        logger.info("WebInBootstrap failed")
        update_status('web_in_bootstap_status', 'error')
        update_status('web_in_bootstrap_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_bootstrap_status', 'success')

    share_prefix = 'jenkins-demo'

    share_name = create_azure_fileshare(share_prefix, bootstrap_bucket,
                                        storage_account_access_key)

    WebInDeploy_vars.update(
        {'Storage_Account_Access_Key': storage_account_access_key})
    WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket})
    WebInDeploy_vars.update({'RG_Name': resource_group})
    WebInDeploy_vars.update({'Attack_RG_Name': resource_group})
    WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name})

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')
    # print("vars {}".format(WebInDeploy_vars))
    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)
    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = tf.output('MGT-IP-FW-1')

    logger.info("Got these values from output \n\n")
    logger.info("AppGateway address is {}".format(albDns))
    logger.info("Internal loadbalancer address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)

    #
    # Configure Firewall
    #
    WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebInFWConf failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
예제 #11
0
def main(username, password):
    username = username
    password = password

    WebInDeploy_vars = {'Admin_Username': username, 'Admin_Password': password}

    WebInBootstrap_vars = {
        'Admin_Username': username,
        'Admin_Password': password
    }

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./WebInDeploy')
    rg_name = tf.output('RG_Name')

    attack_rg_name = tf.output('Attacker_RG_Name')
    logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(
        rg_name, attack_rg_name))

    WebInDeploy_vars.update({'RG_Name': rg_name})
    WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInDeploy_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy build ")

        exit()
    else:

        logger.info("Destroyed WebInDeploy ")

    WebInBootstrap_vars.update({'RG_Name': rg_name})
    WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name})

    tf = Terraform(working_dir='./WebInBootstrap')

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInBootstrap_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInBootstrap destroyed")
        deployment_status = {'WebInDeploy': 'Fail'}

        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        exit()
예제 #12
0
def main(aws_access_key, aws_secret_key, aws_region):
    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
    }

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=True,
                                          vars=vars,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy WebInDeploy ")

        exit()
    else:

        logger.info("Destroyed waf_conf Successfully")

    tf = Terraform(working_dir='./WebInDeploy')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=True,
                                          vars=vars,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy destroyed")
        print('Failed to Destroy WebInDeploy')

        exit(1)
    else:
        print('Destroyed WebInDeploy Successfully')

        exit(0)
예제 #13
0
class Terraform:
    DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock'
    DEFAULT_DOCKER_ENTRYPOINT_PATH = '/docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = '/nginx.docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_IMAGE = 'nginx:stable-alpine'
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = '/usr/share/nginx/html'
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = '******'
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = 'file'

    # trick for terrascript
    class null_resource(Resource):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace('default')
        self.app.cmd('workspace delete', workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode='wt',
                                             suffix='.tf.json',
                                             dir=dir_path,
                                             delete=False)
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path,
            plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            print(config)
            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)
            print(output_result)
            output_var = {
                output_var_key: output_result[output_var_key]['value']
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            f"server",
            image=config_data['os_code'],
            name=config_data['hostname'],
            region=config_data['region_code'],
            size=config_data['plan_code'],
            ssh_keys=config_data['ssh_keys']
            if config_data.get('ssh_keys') else [])
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource['ssh_keys'] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            'ip', value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output('server_id',
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(f"server",
                                        plan_id=config_data['plan_code'],
                                        region_id=config_data['region_code'],
                                        os_id=config_data['os_code'],
                                        hostname=config_data['hostname'],
                                        ssh_key_ids=config_data['ssh_keys']
                                        if config_data.get('ssh_keys') else [])
        vultr_output_ip = Output('ip', value="${vultr_server.server.main_ip}")
        vultr_output_id = Output('server_id',
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key('vultr_ssh_key',
                                              name='default_key',
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner("remote-exec",
                           provisioner=provisioner(
                               "remote-exec",
                               inline=[
                                   'mkdir -p ~/.ssh',
                                   f"{public_key} >> ~/.ssh/authorized_keys"
                               ],
                           ))

    @classmethod
    def gen_ssh_conn_config(cls,
                            *,
                            ssh_user: str = DEFAULT_SSH_USER,
                            ssh_private_key: str,
                            ssh_host: str,
                            ssh_port: int = DEFAULT_SSH_PORT) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            'type': 'ssh',
            'user': ssh_user,
            'private_key': ssh_private_key,
            'host': ssh_host,
            'port': ssh_port,
            'timeout': '30s'
        }

    @classmethod
    def gen_site_docker_deploy_config(cls,
                                      *,
                                      docker_host: str = DEFAULT_DOCKER_HOST,
                                      site_name: str = None,
                                      template_tar_bytes: bytes = None,
                                      script: str = None,
                                      ssh_user: str = DEFAULT_SSH_USER,
                                      ssh_private_key: str,
                                      ssh_host: str,
                                      ssh_port: int = DEFAULT_SSH_PORT):
        config = Terrascript()
        docker_provider = provider.docker(host=docker_host,
                                          connection=cls.gen_ssh_conn_config(
                                              ssh_user=ssh_user,
                                              ssh_private_key=ssh_private_key,
                                              ssh_host=ssh_host,
                                              ssh_port=ssh_port))
        docker_image_resource = resource.docker_image(
            'nginx_image',
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            'nginx_container',
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={'internal': 80},
            upload=[])
        docker_name_resource = resource.random_pet(
            'docker_pet_name',
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode('utf8')
            template_tar_path = f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource['upload'].append({
                'content_base64':
                template_tar_file_content,
                'file':
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH)
            docker_container_resource['upload'].append({
                'content':
                entrypoint_sh_content,
                'file':
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(self,
                    *,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        exec_resource = self.null_resource('remote-exec',
                                           provisioner=provisioner(
                                               "remote-exec",
                                               inline=['ls -la'],
                                               connection=ssh_conn))

        exec_config += exec_resource
        return exec_config

    def upload_file(self,
                    content: str,
                    *,
                    destination: str = DEFAULT_UPLOAD_PATH,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        file_resource = self.null_resource('upload_file_resource',
                                           provisioner=provisioner(
                                               self.TERRAFORM_RESOURCE_FILE,
                                               content=content,
                                               destination=destination,
                                               connection=ssh_conn))

        upload_config += file_resource
        return upload_config
예제 #14
0
def action_plan(t: Terraform, region: str) -> bool:
    code, stdout, stderr = t.plan(var=f"region={region}")
    report_tf_output(code, stdout, stderr)
    return code != EX_FAILED
예제 #15
0
class Terraform:
    DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
    DEFAULT_DOCKER_ENTRYPOINT_PATH = "/docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = "/nginx.docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_IMAGE = "nginx:stable-alpine"
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = "/usr/share/nginx/html"
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = "******"
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = "file"

    # trick for terrascript
    class null_resource(Resource):
        ...

    class tencentcloud(Provider):
        ...

    class tencentcloud_availability_zones(Data):
        ...

    class tencentcloud_images(Data):
        ...

    class tencentcloud_instance_types(Data):
        ...

    class tencentcloud_security_group(Resource):
        ...

    class tencentcloud_security_group_lite_rule(Resource):
        ...

    class tencentcloud_instance(Resource):
        ...

    class tencentcloud_key_pair(Resource):
        ...

    class alicloud(Provider):
        ...

    class alicloud_vpc(Resource):
        ...

    class alicloud_key_pair(Resource):
        ...

    class alicloud_security_group(Resource):
        ...

    class alicloud_security_group_rule(Resource):
        ...

    class alicloud_instance(Resource):
        ...

    class alicloud_vswitch(Resource):
        ...

    class alicloud_zones(Data):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace("default")
        self.app.cmd("workspace delete", workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode="wt",
                                             suffix=".tf.json",
                                             dir=dir_path,
                                             delete=False)
        logging.info(str(config))
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path
            # disable maual plugin because it changes toooo fast
            # dir_path, plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)

            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)

            output_var = {
                output_var_key: output_result[output_var_key]["value"]
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            "server",
            image=config_data["os_code"],
            name=config_data["hostname"],
            region=config_data["region_code"],
            size=config_data["plan_code"],
            ssh_keys=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource["ssh_keys"] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            "ip", value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output("server_id",
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(
            "server",
            plan_id=config_data["plan_code"],
            region_id=config_data["region_code"],
            os_id=config_data["os_code"],
            hostname=config_data["hostname"],
            ssh_key_ids=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
        vultr_output_id = Output("server_id",
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key("vultr_ssh_key",
                                              name="default_key",
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def gen_tencent_cloud_config(
        cls,
        config_data: dict,
        token: str,
        public_key_name: str = None,
        secret_id: str = None,
    ):
        tencent_cloud_config = Terrascript()
        tencent_terraform = terraform(
            **{
                "required_providers": {
                    "tencentcloud": {
                        "source": "terraform-providers/tencentcloud",
                        "version": "~> 1.40.3",
                    },
                }
            })

        tencent_cloud_provider = cls.tencentcloud(
            secret_id=secret_id,
            secret_key=token,
            region=config_data["region_code"],
        )
        tencent_zone = cls.tencentcloud_availability_zones("default")
        tencent_security_group = cls.tencentcloud_security_group(
            "default", name="all-open", description="open all ports")
        tencent_security_group_rule = cls.tencentcloud_security_group_lite_rule(
            "rule",
            security_group_id="${tencentcloud_security_group.default.id}",
            ingress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
            egress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
        )
        tencent_cloud_server = cls.tencentcloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone=
            "${data.tencentcloud_availability_zones.default.zones.0.name}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            disable_monitor_service=True,
            disable_security_service=True,
            allocate_public_ip=True,
            internet_max_bandwidth_out=5,
            instance_charge_type="POSTPAID_BY_HOUR",
            internet_charge_type="TRAFFIC_POSTPAID_BY_HOUR",
            system_disk_type="CLOUD_SSD",
            count=1,
        )
        tencent_output_ip = Output(
            "ip", value="${tencentcloud_instance.server.0.public_ip}")
        tencent_output_id = Output(
            "server_id", value="${tencentcloud_instance.server.0.id}")

        if public_key_name:
            tencent_cloud_server["key_name"] = public_key_name

        tencent_cloud_config += tencent_terraform
        tencent_cloud_config += tencent_cloud_provider
        tencent_cloud_config += tencent_zone
        tencent_cloud_config += tencent_security_group
        tencent_cloud_config += tencent_security_group_rule
        tencent_cloud_config += tencent_cloud_server
        tencent_cloud_config += tencent_output_ip
        tencent_cloud_config += tencent_output_id

        return tencent_cloud_config

    @classmethod
    def gen_ali_cloud_config(
        cls,
        config_data: dict,
        token: str,
        ssh_key_name: str = None,
        access_key: str = None,
        security_groups: List[str] = [],
    ):
        ali_cloud_config = Terrascript()
        ali_cloud_provider = cls.alicloud(
            access_key=access_key,
            secret_key=token,
            region=config_data["region_code"],
        )

        ali_zone = cls.alicloud_zones(
            "default",
            available_disk_category="cloud_efficiency",
            available_resource_creation="Instance",
        )
        ali_vpc = cls.alicloud_vpc(
            "vpc",
            cidr_block="172.16.0.0/12",
        )
        ali_vswitch = cls.alicloud_vswitch(
            "vswitch",
            vpc_id="${alicloud_vpc.vpc.id}",
            cidr_block="172.16.0.0/29",
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
        )
        ali_security_group = cls.alicloud_security_group(
            "group",
            name="all-open",
            vpc_id="${alicloud_vpc.vpc.id}",
            description="open all ports",
            inner_access_policy="Accept",
        )
        ali_internet_security_group_rule = cls.alicloud_security_group_rule(
            "internet",
            # nic_type="internet",
            security_group_id="${alicloud_security_group.group.id}",
            type="ingress",
            port_range="-1/-1",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
        )
        ali_intranet_security_group_rule = cls.alicloud_security_group_rule(
            "intranet",
            # nic_type="intranet",
            security_group_id="${alicloud_security_group.group.id}",
            port_range="-1/-1",
            type="egress",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
            priority=1,
        )
        ali_cloud_server = cls.alicloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
            # security_groups=security_groups,
            security_groups="${alicloud_security_group.group.*.id}",
            vswitch_id="${alicloud_vswitch.vswitch.id}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            security_enhancement_strategy="Deactive",
            instance_charge_type="PostPaid",
            internet_charge_type="PayByTraffic",
            internet_max_bandwidth_out=2,
        )
        ali_output_ip = Output("ip",
                               value="${alicloud_instance.server.public_ip}")
        ali_output_id = Output("server_id",
                               value="${alicloud_instance.server.id}")

        if ssh_key_name:
            ali_cloud_server["key_name"] = ssh_key_name

        ali_cloud_config += ali_cloud_provider
        ali_cloud_config += ali_zone
        ali_cloud_config += ali_vpc
        ali_cloud_config += ali_vswitch
        ali_cloud_config += ali_security_group
        ali_cloud_config += ali_internet_security_group_rule
        ali_cloud_config += ali_intranet_security_group_rule
        ali_cloud_config += ali_cloud_server
        ali_cloud_config += ali_output_ip
        ali_cloud_config += ali_output_id

        return ali_cloud_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner(
            "remote-exec",
            provisioner=provisioner(
                "remote-exec",
                inline=[
                    "mkdir -p ~/.ssh",
                    f"{public_key} >> ~/.ssh/authorized_keys"
                ],
            ),
        )

    @classmethod
    def gen_ssh_conn_config(
        cls,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            "type": "ssh",
            "user": ssh_user,
            "private_key": ssh_private_key,
            "host": ssh_host,
            "port": ssh_port,
            "timeout": "30s",
        }

    @classmethod
    def gen_site_docker_deploy_config(
        cls,
        *,
        docker_host: str = DEFAULT_DOCKER_HOST,
        site_name: str = None,
        template_tar_bytes: bytes = None,
        script: str = None,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        config = Terrascript()
        docker_provider = provider.docker(
            host=docker_host,
            connection=cls.gen_ssh_conn_config(
                ssh_user=ssh_user,
                ssh_private_key=ssh_private_key,
                ssh_host=ssh_host,
                ssh_port=ssh_port,
            ),
        )
        docker_image_resource = resource.docker_image(
            "nginx_image",
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            "nginx_container",
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={"internal": 80},
            upload=[],
        )
        docker_name_resource = resource.random_pet(
            "docker_pet_name",
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = (
                f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz", )
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode("utf8")
            template_tar_path = (
                f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            )
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource["upload"].append({
                "content_base64":
                template_tar_file_content,
                "file":
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH,
            )
            docker_container_resource["upload"].append({
                "content":
                entrypoint_sh_content,
                "file":
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(
        self,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        exec_resource = self.null_resource(
            "remote-exec",
            provisioner=provisioner("remote-exec",
                                    inline=["ls -la"],
                                    connection=ssh_conn),
        )

        exec_config += exec_resource
        return exec_config

    def upload_file(
        self,
        content: str,
        *,
        destination: str = DEFAULT_UPLOAD_PATH,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        file_resource = self.null_resource(
            "upload_file_resource",
            provisioner=provisioner(
                self.TERRAFORM_RESOURCE_FILE,
                content=content,
                destination=destination,
                connection=ssh_conn,
            ),
        )

        upload_config += file_resource
        return upload_config