Ejemplo n.º 1
0
def main(username, password):
    #get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout)
    #
    # Destroy Infrastructure
    #
    tfstate_file = 'terraform.tfstate'
    tfstate_files = ['terraform.tfstate', 'terraform.tfstate.backup']

    fpath = './WebInDeploy/' + tfstate_file
    if os.path.isfile(fpath):
        tf = Terraform(working_dir='./WebInDeploy')
        rg_name = tf.output('RESOURCE_GROUP_NAME')
        #rg_name1 = tf.output('Resource_Group')
        delete_rg_cmd = 'group delete --name ' + rg_name + ' --yes'
        az_cli(delete_rg_cmd)
    #
    # Delete state files WebInDeploy
    #
    delete_state_files('./WebInDeploy/', tfstate_files)

    fpath = './WebInBootstrap/' + tfstate_file
    if os.path.isfile(fpath):
        tf = Terraform(working_dir='./WebInBootstrap')
        rg_name1 = tf.output('Resource_Group')
        delete_rg_cmd = 'group delete --name ' + rg_name1 + ' --yes'
        az_cli(delete_rg_cmd)
    #
    # Delete state files WebInBootstrap
    #
    delete_state_files('./WebInBootstrap/', tfstate_files)

    #
    # Delete state files WebInFWConf
    #
    delete_state_files('./WebInFWConf/', tfstate_files)
Ejemplo n.º 2
0
class TerraformProvider():
    def __init__(self, configuration, terraform_workspace):
        log.info("Preparing terraform deployment")
        log.debug("Using workspace: {}".format(terraform_workspace))

        self._backend_provider = get_backend_provider(configuration,
                                                      terraform_workspace)
        self._controller = Terraform(
            working_dir=terraform_workspace,
            variables=configuration["terraform"]["parameters"])

        self._controller.init(capture_output=False, force_copy=IsFlagged)

    @stage("Terraform deploy")
    def deploy(self):
        log.info("Deploying terraform infrastructure")

        self._backend_provider.init_remote_backend()
        self._controller.apply(capture_output=False, skip_plan=True)
        output = self._controller.output()
        artifact.create("terraform_output", content=json.dumps(output))

    def destroy(self):
        log.info("Destroying terraform infrastructure")

        self._controller.destroy(capture_output=False)
        self._backend_provider.remove_remote_backend()
Ejemplo n.º 3
0
def terraform_apply(env_data, tf: Terraform):

    retry_count = 0
    return_code = 0
    while retry_count < 5:
        logger.debug("Try {}".format(retry_count))
        return_code, stdout, stderr = tf.apply(skip_plan=True,
                                               var_file=env_data,
                                               capture_output=True)
        logger.debug('Terraform apply return code is {}'.format(return_code))
        logger.debug('Terraform apply stdout is {}'.format(stdout))
        logger.debug("Terraform apply stderr is {}".format(stderr))
        retry_count += 1
        if return_code == 0:
            break
        time.sleep(30)

    if return_code == 0:
        show_return_code, tf_state, stdout = tf.show(json=True)
        logger.debug(
            'Terraform show return code is {}'.format(show_return_code))
        logger.debug('Terraform show stdout is {}'.format(stdout))
        tf_outputs = tf.output()
        for output_value in tf_outputs:
            logger.debug('Terraform output value is {}'.format(output_value))
    else:
        # TODO get output for errors
        tf_state = {}
        tf_outputs = {}
        traceback.print_stack()
    return {
        "tf_return_code": return_code,
        "tf_outputs": tf_outputs,
        "tf_state": tf_state
    }
Ejemplo n.º 4
0
class Terraform:
    def __init__(self, init):
        from python_terraform import Terraform as PythonTerraform

        self.terraform = PythonTerraform(working_dir='terraform')
        Path(self.working_dir).mkdir(exist_ok=True)
        if init:
            return_code, _, err = self.terraform.init(
                dir_or_plan=self.working_dir)
            if return_code != 0:
                raise CwsCommandError(err)

    @property
    def working_dir(self):
        return self.terraform.working_dir

    def init(self):
        return_code, _, err = self.terraform.init()
        if return_code != 0:
            raise CwsCommandError(err)

    def apply(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.apply(target=targets,
                                                   skip_plan=True,
                                                   input=False,
                                                   raise_on_error=False,
                                                   parallelism=1)
        if return_code != 0:
            raise CwsCommandError(err)

    def destroy(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.destroy(target=targets)
        if return_code != 0:
            raise CwsCommandError(err)

    def output(self):
        self.select_workspace("default")
        values = self.terraform.output(capture_output=True)
        return {key: value['value']
                for key, value in values.items()} if values else "{}"

    def workspace_list(self):
        self.select_workspace("default")
        return_code, out, err = self.terraform.cmd('workspace', 'list')
        if return_code != 0:
            raise CwsCommandError(err)
        values = out[1:].translate(str.maketrans('', '', ' \t\r')).split('\n')
        return filter(None, values)

    def select_workspace(self, workspace):
        return_code, out, err = self.terraform.workspace('select', workspace)
        if workspace != 'default' and return_code != 0:
            _, out, err = self.terraform.workspace('new',
                                                   workspace,
                                                   raise_on_error=True)
        if not (Path(self.working_dir) / '.terraform').exists():
            self.terraform.init(input=False, raise_on_error=True)
Ejemplo n.º 5
0
class TerraformBaseAction(Action):
    def __init__(self, config):
        """Creates a new BaseAction given a StackStorm config object (kwargs works too)
        Also stores the Terraform class from python_terraform in a class variable
        :param config: StackStorm configuration object for the pack
        :returns: a new BaseAction
        """
        super(TerraformBaseAction, self).__init__(config)
        self.terraform = Terraform()

    def check_result(self,
                     return_code,
                     stdout,
                     stderr,
                     return_output=False,
                     valid_return_codes=None):
        """Check the return code from the terraform action and return the output with
        the error message (if there is one)
        :param return_code: return code from the Terraform command that was run
        :param stdout: output from the Terraform command that was run
        :param stderr: error message (if any) from the Terraform command
        :param return_output: whether to return the results of `terraform output` command
        :param valid_return_codes: list of valid return codes
        :returns: success flag for the st2 action and output from the Terraform command
        """
        if valid_return_codes is None:
            valid_return_codes = [0]
        if return_output:
            output = None
            if return_code in valid_return_codes:
                output = self.terraform.output(state=self.terraform.state)
        else:
            output = TerraformBaseAction.concat_std_output(stdout, stderr)

        # Capture success status vs valid return codes and return result
        success = (return_code in valid_return_codes)
        return success, output

    @staticmethod
    def concat_std_output(stdout, stderr):
        """Combines stdout and stderr from terraform execution, one or the other
        can be None
        :param stdout: results from stdout
        :param stderr: results from stderr
        :returns: string concatenation
        """
        output = None
        if stderr or stdout:
            output = ''
            if stdout:
                output += stdout
            if stderr:
                if stdout:
                    output += "\n"
                output += stderr

        return output
Ejemplo n.º 6
0
def terraform_output(setup_terraform,
                     terraform_bin_path) -> Dict[str, Dict[str, str]]:
    tf = Terraform(working_dir=full_path,
                   terraform_bin_path=terraform_bin_path)
    outputs = tf.output()
    if outputs is not None:
        return outputs

    raise Exception("Cannot retrieve the outputs")
Ejemplo n.º 7
0
def main(username, password):
    #get_default_cli().invoke(['login', "--use-device-code"], out_file=sys.stdout)
    #
    # Destroy Infrastructure
    #
    tfstate_file = 'terraform.tfstate'
    fpath = './WebInDeploy/' + tfstate_file
    if os.path.isfile(fpath):
        tf = Terraform(working_dir='./WebInDeploy')
        rg_name = tf.output('RG_Name')
        rg_name1 = tf.output('Attacker_RG_Name')
        delete_rg_cmd = 'group delete --name ' + rg_name + ' --yes'
        az_cli(delete_rg_cmd)
        delete_file(fpath)

    fpath = './WebInBootstrap/' + tfstate_file
    if os.path.isfile(fpath):
        delete_rg_cmd = 'group delete --name ' + rg_name1 + ' --yes'
        az_cli(delete_rg_cmd)
        delete_file(fpath)
Ejemplo n.º 8
0
 def test_override_default(self, folder, variables):
     tf = Terraform(working_dir=current_path, variables=variables)
     tf.init(folder)
     ret, out, err = tf.apply(
         folder,
         var={"test_var": "test2"},
         no_color=IsNotFlagged,
     )
     out = out.replace("\n", "")
     assert "\x1b[0m\x1b[1m\x1b[32mApply" in out
     out = tf.output("test_output")
     assert "test2" in out
Ejemplo n.º 9
0
 def test_output(self, caplog: LogCaptureFixture, output_all: bool):
     expected_value = "test"
     required_output = "test_output"
     with caplog.at_level(logging.INFO):
         tf = Terraform(working_dir=current_path,
                        variables={"test_var": expected_value})
         tf.init("var_to_output")
         tf.apply("var_to_output")
         params = tuple() if output_all else (required_output, )
         result = tf.output(*params)
     if output_all:
         assert result[required_output]["value"] == expected_value
     else:
         assert result == expected_value
     assert expected_value in caplog.messages[-1]
Ejemplo n.º 10
0
def vm_docker_deploy_old(config):
    # This script should deploy the instance and return the output/logs after the test has finished

    file_dir = os.path.dirname(os.path.realpath(__file__))
    provider = config["params"]["Provider"][0]
    ### Check that a selection was made
    if config["selection"]["instance"] is None:
        config["logs"] = None
        return config

    ### Setup terraform objects
    instance_wkdir = file_dir + "/instance_deploy/" + provider
    instance_tf = Terraform(working_dir=instance_wkdir)
    docker_tf = Terraform(file_dir + "/docker_deploy")

    tfstate_path = config["base_dir"] + '/tf_states/' + str(config["job_id"])
    tfvars = config["base_dir"] + "/tfvars.tfvars"

    ## ALSO DIRECT TO A VARS.TF IN THE BASE_DIR
    instance_tf.init(
        backend_config={'path': tfstate_path + '/terraform.tfstate'})
    instance_tf.apply(var_file=tfvars,
                      lock=False,
                      var={'instance_type': config["selection"]["instance"]},
                      skip_plan=True)

    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    docker_tf.apply(var_file=tfvars,
                    lock=False,
                    var={'tfstate_path': tfstate_path},
                    skip_plan=True)

    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    logs = docker_tf.output()
    config["logs"] = logs
    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    docker_tf.destroy(auto_approve=True)
    instance_tf.init(
        backend_config={'path': tfstate_path + '/terraform.tfstate'})
    instance_tf.destroy(auto_approve=True)

    return config
Ejemplo n.º 11
0
def apply_tf(working_dir, vars, description):
    """
    Handles terraform operations and returns variables in outputs.tf as a dict.
    :param working_dir: Directory that contains the tf files
    :param vars: Additional variables passed in to override defaults equivalent to -var
    :param description: Description of the deployment for logging purposes
    :return:    return_code - 0 for success or other for failure
                outputs - Dictionary of the terraform outputs defined in the outputs.tf file

    """
    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout

    start_time = time.asctime()
    print('Starting Deployment at {}\n'.format(start_time))

    # Create Bootstrap

    tf = Terraform(working_dir=working_dir)

    tf.cmd('init')
    if run_plan:

        # print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code, stdout, stderr = tf.apply(vars=vars,
                                           capture_output=capture_output,
                                           skip_plan=True,
                                           **kwargs)
    outputs = tf.output()

    logger.debug('Got Return code {} for deployment of  {}'.format(
        return_code, description))

    return (return_code, outputs)
Ejemplo n.º 12
0
def create(pathToInf):

    # Initalize a terraform object
    # Make the working directory the openstack directory
    terra = Terraform(pathToInf)

    # Apply the IAC in the openstack directory and skip the planning Stage
    return_code, stdout, stderr = terra.apply(skip_plan=True)

    # Get the outputs from the apply
    outputs = terra.output()
    print(stderr)
    print(stdout)
    print(outputs)
    # Return the outputs
    return outputs, return_code
Ejemplo n.º 13
0
def build_controller():
    bucket_backend = 'bucket=' + str(s3_bucket)
    table_backend = 'dynamodb_table=' + str(s3_bucket)
    backend_configs = [bucket_backend, table_backend]

    tf_base = Terraform(workdir_ctrl)
    tf_base.init(backend_config=backend_configs)
    #tf_base.plan(capture_output=False, var_file=tfvars)
    return_code = tf_base.apply(skip_plan=True,
                                capture_output=False,
                                var_file=tfvars)
    controller_outputs = tf_base.output(capture_output=True)
    if return_code[0] == 1:
        print("Something went wrong!")
        sys.exit(1)
    else:
        print("All good!")
        pass
    return controller_outputs
Ejemplo n.º 14
0
def orchestration():

    # Initalize a terraform object
    # Make the working directory the openstack directory
    terra = Terraform("./openstack")

    # Apply the IAC in the openstack directory and skip the planning Stage
    return_code, stdout, stderr = terra.apply(skip_plan=True)

    # Print the results
    print(return_code)
    print(stdout)
    print(stderr)

    # Get the outputs from the apply
    outputs = terra.output()

    # Return the outputs
    return outputs
Ejemplo n.º 15
0
        print("confirmed password")

        wait_until_channel_endswith(ssh_channel, b'# ')
        ssh_channel.send(f'commit\n')
        print("sent commit")

        # longer timeout of 60s to cater to commit time
        wait_until_channel_endswith(ssh_channel, b'# ', 60)
        print("changed admin password")


urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

working_dir = "./"
tf = Terraform(working_dir=working_dir)
outputs = tf.output()
fw1_mgmt = outputs['FW-1-Mgmt']['value']
fw2_mgmt = outputs['FW-2-Mgmt']['value']
username = "******"
new_password = outputs['password_new']['value']
ssh_key_path = outputs['ssh_key_path']['value']

time.sleep(600)

fw_init(fw1_mgmt, username, ssh_key_path, new_password)
fw_init(fw2_mgmt, username, ssh_key_path, new_password)

# Get new API Key
url = "https://%s/api/?type=keygen&user=%s&password=%s" % (fw1_mgmt, username,
                                                           new_password)
response = requests.get(url, verify=False)
Ejemplo n.º 16
0
def main(username, password, aws_access_key, aws_secret_key, aws_region,
         ec2_key_pair, bootstrap_bucket):
    username = username
    password = password
    aws_access_key = aws_access_key
    aws_secret_key = aws_secret_key
    aws_region = aws_region
    ec2_key_pair = ec2_key_pair
    albDns = ''
    nlbDns = ''
    fwMgt = ''

    default_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region
    }

    WebInDeploy_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'bootstrap_s3bucket': bootstrap_bucket
    }

    waf_conf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'alb_arn': albDns,
        'nlb-dns': nlbDns
    }

    WebInFWConf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'mgt-ipaddress-fw1': fwMgt,
        'nlb-dns': nlbDns,
        'username': username,
        'password': password
    }

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    # update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = fwMgt

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt
    WebInFWConf_vars['nlb-dns'] = nlbDns

    WebInDeploy_vars['alb_dns'] = albDns
    WebInDeploy_vars['nlb-dns'] = nlbDns

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}
    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=capture_output, var=vars, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=waf_conf_vars,
                                            **kwargs)

    waf_conf_out = tf.output()

    update_status('waf_conf_output', waf_conf_out)
    # update_status('waf_conf_stdout', stdout)
    # update_status('waf_conf_stderr', stderr)

    logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3))

    if return_code3 != 0:
        logger.info("waf_conf failed")
        update_status('waf_conf_status', 'error')
        update_status('waf_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('waf_conf_status', 'success')

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)
    updateHandle = updater.ContentUpdater(fw)

    # updateHandle.download(fw)
    # logger.info("Waiting 3 minutes for content update to download")
    # time.sleep(210)
    # updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebFWConfy failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
Ejemplo n.º 17
0
def main(fwUsername, fwPasswd):

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    # capture_output = kwargs.pop('capture_output', True)
    #
    # if capture_output is True:
    #     stderr = subprocess.PIPE
    #     stdout = subprocess.PIPE
    # else:
    #     stderr = sys.stderr
    #     stdout = sys.stdout

    #
    # Build Infrastructure
    #

    tf = Terraform(working_dir='./WebInDeploy')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            **kwargs)
    #return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        deployment_status = {'WebInDeploy': 'Fail'}
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        write_status_file(deployment_status)

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    # fwUsername = "******"
    # fwPasswd = "PaloAlt0!123!!"
    fw_trust_ip = fwMgt

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=False, var={'alb_arn': nlbDns}, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'alb_arn': nlbDns,
                                                'int-nlb-fqdn': nlbDns
                                            },
                                            **kwargs)

    if return_code3 != 0:
        logger.info("waf_conf failed")
        deployment_status.update({'waf_conf': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'waf_conf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    #

    class FWNotUpException(Exception):
        pass

    err = 'no'
    api_key = ''
    api_key = getApiKey(fw_trust_ip, fwUsername, fwPasswd)

    while True:
        err = getFirewallStatus(fw_trust_ip, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")
            #raise FWNotUpException('FW is not up!  Request Timeout')

        elif err == 'no':
            logger.info("FW is not up...yet")
            print("FW is not up...yet")
            time.sleep(60)
            continue
            #raise FWNotUpException('FW is not up!')
        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue
        elif err == 'yes':
            logger.info("[INFO]: FW is up")
            break

    fw = firewall.Firewall(hostname=fw_trust_ip,
                           api_username=fwUsername,
                           api_password=fwPasswd)
    logger.info("Updating firewall with latest content pack")
    updateHandle = updater.ContentUpdater(fw)

    updateHandle.download()

    logger.info("Waiting 3 minutes for content update to download")
    time.sleep(210)
    updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    if run_plan:
        tf.plan(capture_output=False,
                var={
                    'mgt-ipaddress-fw1': fwMgt,
                    'int-nlb-fqdn': nlbDns
                })

    return_code2, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'mgt-ipaddress-fw1': fwMgt,
                                                'nlb-dns': nlbDns,
                                                'aws_access_key':
                                                aws_access_key,
                                                'aws_secret_key':
                                                aws_secret_key
                                            },
                                            **kwargs)
    #return_code2 = 0
    if return_code2 != 0:
        logger.info("WebFWConfy failed")
        deployment_status.update({'WebFWConfy': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'WebFWConf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Commit changes to firewall")

    fw.commit()

    logger.info('Checking if Jenkins Server is ready')

    #    tf = Terraform(working_dir='./WebInDeploy')
    #   albDns = tf.output('ALB-DNS')
    count = 0
    max_tries = 3
    while True:
        if count < max_tries:
            res = getServerStatus(albDns)
            if res == 'server_down':
                count = count + 1
                time.sleep(2)
                continue
            elif res == 'server_up':
                break
        else:
            break
    logger.info('Jenkins Server is ready')
    logger.info('\n\n   ### Deployment Complete ###')
    logger.info('\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
Ejemplo n.º 18
0
import pytest

import json
import os

from python_terraform import Terraform
tf = Terraform(working_dir='infrastructure/')
stack = tf.output()


@pytest.fixture
def reader_lambda_runtime():
    os.environ['QUEUE_URL'] = stack['reader_queue_url']['value']
    yield
    del os.environ['QUEUE_URL']


@pytest.fixture
def writer_lambda_runtime():
    os.environ['DYNAMODB_TABLE_NAME'] = stack['dynamodb_table']['value']
    yield
    del os.environ['DYNAMODB_TABLE_NAME']


@pytest.fixture
def model_lambda_runtime():
    os.environ["OUTPUT_QUEUE_URL"] = stack["writer_queue_url"]["value"]
    os.environ[
        "MODEL_PATH"] = "s3://kalemiller-model-artifacts/real-time-wine/model.joblib"
    yield  # TODO
    del os.environ["OUTPUT_QUEUE_URL"]
Ejemplo n.º 19
0
def main(username, password, rg_name, azure_region):
    username = username
    password = password

    WebInBootstrap_vars = {'RG_Name': rg_name, 'Azure_Region': azure_region}

    WebInDeploy_vars = {
        'Admin_Username': username,
        'Admin_Password': password,
        'Azure_Region': azure_region
    }

    WebInFWConf_vars = {'Admin_Username': username, 'Admin_Password': password}

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Create Bootstrap

    tf = Terraform(working_dir='./WebInBootstrap')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False)
    return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    resource_group = tf.output('Resource_Group')
    bootstrap_bucket = tf.output('Bootstrap_Bucket')
    storage_account_access_key = tf.output('Storage_Account_Access_Key')
    web_in_bootstrap_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_bootstrap_output', web_in_bootstrap_output)

    if return_code1 != 0:
        logger.info("WebInBootstrap failed")
        update_status('web_in_bootstap_status', 'error')
        update_status('web_in_bootstrap_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_bootstrap_status', 'success')

    share_prefix = 'jenkins-demo'

    share_name = create_azure_fileshare(share_prefix, bootstrap_bucket,
                                        storage_account_access_key)

    WebInDeploy_vars.update(
        {'Storage_Account_Access_Key': storage_account_access_key})
    WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket})
    WebInDeploy_vars.update({'RG_Name': resource_group})
    WebInDeploy_vars.update({'Attack_RG_Name': resource_group})
    WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name})

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')
    # print("vars {}".format(WebInDeploy_vars))
    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)
    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = tf.output('MGT-IP-FW-1')

    logger.info("Got these values from output \n\n")
    logger.info("AppGateway address is {}".format(albDns))
    logger.info("Internal loadbalancer address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)

    #
    # Configure Firewall
    #
    WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebInFWConf failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
Ejemplo n.º 20
0
def main(username, password):
    username = username
    password = password

    WebInDeploy_vars = {'Admin_Username': username, 'Admin_Password': password}

    WebInBootstrap_vars = {
        'Admin_Username': username,
        'Admin_Password': password
    }

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./WebInDeploy')
    rg_name = tf.output('RG_Name')

    attack_rg_name = tf.output('Attacker_RG_Name')
    logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(
        rg_name, attack_rg_name))

    WebInDeploy_vars.update({'RG_Name': rg_name})
    WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInDeploy_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy build ")

        exit()
    else:

        logger.info("Destroyed WebInDeploy ")

    WebInBootstrap_vars.update({'RG_Name': rg_name})
    WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name})

    tf = Terraform(working_dir='./WebInBootstrap')

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInBootstrap_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInBootstrap destroyed")
        deployment_status = {'WebInDeploy': 'Fail'}

        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        exit()
Ejemplo n.º 21
0
#!python

from python_terraform import Terraform, IsFlagged

tf_base = Terraform(working_dir='iac/layer-base')
tf_users = Terraform(working_dir='iac/layer-users')

tf_base.apply(auto_approve=IsFlagged, capture_output=False)
tf_users.apply(auto_approve=IsFlagged, capture_output=False)

url_pf = tf_users.output('env_url')

print("Test with: " + url_pf)
Ejemplo n.º 22
0
def remotestate():
    tf_state = Terraform(remotestate_dic)
    tf_state.init()
    #tf_state.plan(capture_output=False, var_file=tfvars)
    tf_state.apply(skip_plan=True, capture_output=False, var_file=tfvars)
    state_output = tf_state.output(capture_output=True)
Ejemplo n.º 23
0
class Terraform:
    DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock'
    DEFAULT_DOCKER_ENTRYPOINT_PATH = '/docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = '/nginx.docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_IMAGE = 'nginx:stable-alpine'
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = '/usr/share/nginx/html'
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = '******'
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = 'file'

    # trick for terrascript
    class null_resource(Resource):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace('default')
        self.app.cmd('workspace delete', workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode='wt',
                                             suffix='.tf.json',
                                             dir=dir_path,
                                             delete=False)
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path,
            plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            print(config)
            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)
            print(output_result)
            output_var = {
                output_var_key: output_result[output_var_key]['value']
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            f"server",
            image=config_data['os_code'],
            name=config_data['hostname'],
            region=config_data['region_code'],
            size=config_data['plan_code'],
            ssh_keys=config_data['ssh_keys']
            if config_data.get('ssh_keys') else [])
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource['ssh_keys'] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            'ip', value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output('server_id',
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(f"server",
                                        plan_id=config_data['plan_code'],
                                        region_id=config_data['region_code'],
                                        os_id=config_data['os_code'],
                                        hostname=config_data['hostname'],
                                        ssh_key_ids=config_data['ssh_keys']
                                        if config_data.get('ssh_keys') else [])
        vultr_output_ip = Output('ip', value="${vultr_server.server.main_ip}")
        vultr_output_id = Output('server_id',
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key('vultr_ssh_key',
                                              name='default_key',
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner("remote-exec",
                           provisioner=provisioner(
                               "remote-exec",
                               inline=[
                                   'mkdir -p ~/.ssh',
                                   f"{public_key} >> ~/.ssh/authorized_keys"
                               ],
                           ))

    @classmethod
    def gen_ssh_conn_config(cls,
                            *,
                            ssh_user: str = DEFAULT_SSH_USER,
                            ssh_private_key: str,
                            ssh_host: str,
                            ssh_port: int = DEFAULT_SSH_PORT) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            'type': 'ssh',
            'user': ssh_user,
            'private_key': ssh_private_key,
            'host': ssh_host,
            'port': ssh_port,
            'timeout': '30s'
        }

    @classmethod
    def gen_site_docker_deploy_config(cls,
                                      *,
                                      docker_host: str = DEFAULT_DOCKER_HOST,
                                      site_name: str = None,
                                      template_tar_bytes: bytes = None,
                                      script: str = None,
                                      ssh_user: str = DEFAULT_SSH_USER,
                                      ssh_private_key: str,
                                      ssh_host: str,
                                      ssh_port: int = DEFAULT_SSH_PORT):
        config = Terrascript()
        docker_provider = provider.docker(host=docker_host,
                                          connection=cls.gen_ssh_conn_config(
                                              ssh_user=ssh_user,
                                              ssh_private_key=ssh_private_key,
                                              ssh_host=ssh_host,
                                              ssh_port=ssh_port))
        docker_image_resource = resource.docker_image(
            'nginx_image',
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            'nginx_container',
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={'internal': 80},
            upload=[])
        docker_name_resource = resource.random_pet(
            'docker_pet_name',
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode('utf8')
            template_tar_path = f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource['upload'].append({
                'content_base64':
                template_tar_file_content,
                'file':
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH)
            docker_container_resource['upload'].append({
                'content':
                entrypoint_sh_content,
                'file':
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(self,
                    *,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        exec_resource = self.null_resource('remote-exec',
                                           provisioner=provisioner(
                                               "remote-exec",
                                               inline=['ls -la'],
                                               connection=ssh_conn))

        exec_config += exec_resource
        return exec_config

    def upload_file(self,
                    content: str,
                    *,
                    destination: str = DEFAULT_UPLOAD_PATH,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        file_resource = self.null_resource('upload_file_resource',
                                           provisioner=provisioner(
                                               self.TERRAFORM_RESOURCE_FILE,
                                               content=content,
                                               destination=destination,
                                               connection=ssh_conn))

        upload_config += file_resource
        return upload_config
Ejemplo n.º 24
0
class Terraform:
    DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
    DEFAULT_DOCKER_ENTRYPOINT_PATH = "/docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = "/nginx.docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_IMAGE = "nginx:stable-alpine"
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = "/usr/share/nginx/html"
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = "******"
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = "file"

    # trick for terrascript
    class null_resource(Resource):
        ...

    class tencentcloud(Provider):
        ...

    class tencentcloud_availability_zones(Data):
        ...

    class tencentcloud_images(Data):
        ...

    class tencentcloud_instance_types(Data):
        ...

    class tencentcloud_security_group(Resource):
        ...

    class tencentcloud_security_group_lite_rule(Resource):
        ...

    class tencentcloud_instance(Resource):
        ...

    class tencentcloud_key_pair(Resource):
        ...

    class alicloud(Provider):
        ...

    class alicloud_vpc(Resource):
        ...

    class alicloud_key_pair(Resource):
        ...

    class alicloud_security_group(Resource):
        ...

    class alicloud_security_group_rule(Resource):
        ...

    class alicloud_instance(Resource):
        ...

    class alicloud_vswitch(Resource):
        ...

    class alicloud_zones(Data):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace("default")
        self.app.cmd("workspace delete", workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode="wt",
                                             suffix=".tf.json",
                                             dir=dir_path,
                                             delete=False)
        logging.info(str(config))
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path
            # disable maual plugin because it changes toooo fast
            # dir_path, plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)

            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)

            output_var = {
                output_var_key: output_result[output_var_key]["value"]
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            "server",
            image=config_data["os_code"],
            name=config_data["hostname"],
            region=config_data["region_code"],
            size=config_data["plan_code"],
            ssh_keys=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource["ssh_keys"] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            "ip", value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output("server_id",
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(
            "server",
            plan_id=config_data["plan_code"],
            region_id=config_data["region_code"],
            os_id=config_data["os_code"],
            hostname=config_data["hostname"],
            ssh_key_ids=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
        vultr_output_id = Output("server_id",
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key("vultr_ssh_key",
                                              name="default_key",
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def gen_tencent_cloud_config(
        cls,
        config_data: dict,
        token: str,
        public_key_name: str = None,
        secret_id: str = None,
    ):
        tencent_cloud_config = Terrascript()
        tencent_terraform = terraform(
            **{
                "required_providers": {
                    "tencentcloud": {
                        "source": "terraform-providers/tencentcloud",
                        "version": "~> 1.40.3",
                    },
                }
            })

        tencent_cloud_provider = cls.tencentcloud(
            secret_id=secret_id,
            secret_key=token,
            region=config_data["region_code"],
        )
        tencent_zone = cls.tencentcloud_availability_zones("default")
        tencent_security_group = cls.tencentcloud_security_group(
            "default", name="all-open", description="open all ports")
        tencent_security_group_rule = cls.tencentcloud_security_group_lite_rule(
            "rule",
            security_group_id="${tencentcloud_security_group.default.id}",
            ingress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
            egress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
        )
        tencent_cloud_server = cls.tencentcloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone=
            "${data.tencentcloud_availability_zones.default.zones.0.name}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            disable_monitor_service=True,
            disable_security_service=True,
            allocate_public_ip=True,
            internet_max_bandwidth_out=5,
            instance_charge_type="POSTPAID_BY_HOUR",
            internet_charge_type="TRAFFIC_POSTPAID_BY_HOUR",
            system_disk_type="CLOUD_SSD",
            count=1,
        )
        tencent_output_ip = Output(
            "ip", value="${tencentcloud_instance.server.0.public_ip}")
        tencent_output_id = Output(
            "server_id", value="${tencentcloud_instance.server.0.id}")

        if public_key_name:
            tencent_cloud_server["key_name"] = public_key_name

        tencent_cloud_config += tencent_terraform
        tencent_cloud_config += tencent_cloud_provider
        tencent_cloud_config += tencent_zone
        tencent_cloud_config += tencent_security_group
        tencent_cloud_config += tencent_security_group_rule
        tencent_cloud_config += tencent_cloud_server
        tencent_cloud_config += tencent_output_ip
        tencent_cloud_config += tencent_output_id

        return tencent_cloud_config

    @classmethod
    def gen_ali_cloud_config(
        cls,
        config_data: dict,
        token: str,
        ssh_key_name: str = None,
        access_key: str = None,
        security_groups: List[str] = [],
    ):
        ali_cloud_config = Terrascript()
        ali_cloud_provider = cls.alicloud(
            access_key=access_key,
            secret_key=token,
            region=config_data["region_code"],
        )

        ali_zone = cls.alicloud_zones(
            "default",
            available_disk_category="cloud_efficiency",
            available_resource_creation="Instance",
        )
        ali_vpc = cls.alicloud_vpc(
            "vpc",
            cidr_block="172.16.0.0/12",
        )
        ali_vswitch = cls.alicloud_vswitch(
            "vswitch",
            vpc_id="${alicloud_vpc.vpc.id}",
            cidr_block="172.16.0.0/29",
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
        )
        ali_security_group = cls.alicloud_security_group(
            "group",
            name="all-open",
            vpc_id="${alicloud_vpc.vpc.id}",
            description="open all ports",
            inner_access_policy="Accept",
        )
        ali_internet_security_group_rule = cls.alicloud_security_group_rule(
            "internet",
            # nic_type="internet",
            security_group_id="${alicloud_security_group.group.id}",
            type="ingress",
            port_range="-1/-1",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
        )
        ali_intranet_security_group_rule = cls.alicloud_security_group_rule(
            "intranet",
            # nic_type="intranet",
            security_group_id="${alicloud_security_group.group.id}",
            port_range="-1/-1",
            type="egress",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
            priority=1,
        )
        ali_cloud_server = cls.alicloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
            # security_groups=security_groups,
            security_groups="${alicloud_security_group.group.*.id}",
            vswitch_id="${alicloud_vswitch.vswitch.id}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            security_enhancement_strategy="Deactive",
            instance_charge_type="PostPaid",
            internet_charge_type="PayByTraffic",
            internet_max_bandwidth_out=2,
        )
        ali_output_ip = Output("ip",
                               value="${alicloud_instance.server.public_ip}")
        ali_output_id = Output("server_id",
                               value="${alicloud_instance.server.id}")

        if ssh_key_name:
            ali_cloud_server["key_name"] = ssh_key_name

        ali_cloud_config += ali_cloud_provider
        ali_cloud_config += ali_zone
        ali_cloud_config += ali_vpc
        ali_cloud_config += ali_vswitch
        ali_cloud_config += ali_security_group
        ali_cloud_config += ali_internet_security_group_rule
        ali_cloud_config += ali_intranet_security_group_rule
        ali_cloud_config += ali_cloud_server
        ali_cloud_config += ali_output_ip
        ali_cloud_config += ali_output_id

        return ali_cloud_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner(
            "remote-exec",
            provisioner=provisioner(
                "remote-exec",
                inline=[
                    "mkdir -p ~/.ssh",
                    f"{public_key} >> ~/.ssh/authorized_keys"
                ],
            ),
        )

    @classmethod
    def gen_ssh_conn_config(
        cls,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            "type": "ssh",
            "user": ssh_user,
            "private_key": ssh_private_key,
            "host": ssh_host,
            "port": ssh_port,
            "timeout": "30s",
        }

    @classmethod
    def gen_site_docker_deploy_config(
        cls,
        *,
        docker_host: str = DEFAULT_DOCKER_HOST,
        site_name: str = None,
        template_tar_bytes: bytes = None,
        script: str = None,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        config = Terrascript()
        docker_provider = provider.docker(
            host=docker_host,
            connection=cls.gen_ssh_conn_config(
                ssh_user=ssh_user,
                ssh_private_key=ssh_private_key,
                ssh_host=ssh_host,
                ssh_port=ssh_port,
            ),
        )
        docker_image_resource = resource.docker_image(
            "nginx_image",
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            "nginx_container",
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={"internal": 80},
            upload=[],
        )
        docker_name_resource = resource.random_pet(
            "docker_pet_name",
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = (
                f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz", )
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode("utf8")
            template_tar_path = (
                f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            )
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource["upload"].append({
                "content_base64":
                template_tar_file_content,
                "file":
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH,
            )
            docker_container_resource["upload"].append({
                "content":
                entrypoint_sh_content,
                "file":
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(
        self,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        exec_resource = self.null_resource(
            "remote-exec",
            provisioner=provisioner("remote-exec",
                                    inline=["ls -la"],
                                    connection=ssh_conn),
        )

        exec_config += exec_resource
        return exec_config

    def upload_file(
        self,
        content: str,
        *,
        destination: str = DEFAULT_UPLOAD_PATH,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        file_resource = self.null_resource(
            "upload_file_resource",
            provisioner=provisioner(
                self.TERRAFORM_RESOURCE_FILE,
                content=content,
                destination=destination,
                connection=ssh_conn,
            ),
        )

        upload_config += file_resource
        return upload_config