Ejemplo n.º 1
0
def delete_bastion(plateform):
    tf = Terraform(working_dir='terraform/layer-bastion')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'instance_type':
        plateform['infrastructure']['bastion']['instance-type'],
        'instance_image':
        plateform['infrastructure']['bastion']['image'],
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-data")
    def _get_plan_data(cls, tf_dir, tf_plan_path=None) -> (dict, dict):

        # Check "terraform" command
        tf_cmd_path = shutil.which("terraform")
        if tf_cmd_path is None:
            raise CommandNotFound(cmd="terraform")

        # Using "terraform plan/show" to parse configuration
        logger.info("Parsing terraform files...")
        tf = Terraform()
        if tf_plan_path is None:
            tf_plan_path = os.path.join(os.getcwd(), str(uuid4()))
            try:
                cmd_args = [
                    "plan", "-input=false", "-out", tf_plan_path, tf_dir
                ]
                tf.cmd(*cmd_args, raise_on_error=True)
                cmd_args = ["show", "-json", tf_plan_path]
                _, output, _ = tf.cmd(*cmd_args, raise_on_error=True)
            except TerraformCommandError as e:
                raise RunCommandFailed(cmd=e.cmd, reason=e.err or e.out)
            finally:
                if os.path.exists(tf_plan_path):
                    os.remove(tf_plan_path)
        else:
            try:
                cmd_args = ["show", "-json", tf_plan_path]
                _, output, _ = tf.cmd(*cmd_args, raise_on_error=True)
            except TerraformCommandError as e:
                raise RunCommandFailed(cmd=e.cmd, reason=e.err or e.out)

        tf_plan = json.loads(output)
        version = tf_plan[cls.FORMAT_VERSION]
        if version not in cls.SUPPORTED_PLAN_FORMAT_VERSIONS:
            raise TerraformPlanFormatVersionNotSupported(version=version)

        # Using "parsetf" to parse configuration
        try:
            output = parsetf.parse(tf_dir)

        except CalledProcessError as e:
            raise RunCommandFailed(cmd=e.cmd, reason=e.output)

        logger.info("Parse terraform files done")

        tf_data = json.loads(output)
        return tf_plan, tf_data
Ejemplo n.º 3
0
def apply_tf(working_dir, vars, description):
    """
    Handles terraform operations and returns variables in outputs.tf as a dict.
    :param working_dir: Directory that contains the tf files
    :param vars: Additional variables passed in to override defaults equivalent to -var
    :param description: Description of the deployment for logging purposes
    :return:    return_code - 0 for success or other for failure
                outputs - Dictionary of the terraform outputs defined in the outputs.tf file

    """
    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout

    start_time = time.asctime()
    print('Starting Deployment at {}\n'.format(start_time))

    # Create Bootstrap

    tf = Terraform(working_dir=working_dir)

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code, stdout, stderr = tf.apply(vars=vars,
                                           capture_output=capture_output,
                                           skip_plan=True,
                                           **kwargs)
    outputs = tf.output()

    logger.debug('Got Return code {} for deployment of  {}'.format(
        return_code, description))

    return (return_code, outputs)
Ejemplo n.º 4
0
class Terraform:
    def __init__(self, init):
        from python_terraform import Terraform as PythonTerraform

        self.terraform = PythonTerraform(working_dir='terraform')
        Path(self.working_dir).mkdir(exist_ok=True)
        if init:
            return_code, _, err = self.terraform.init(
                dir_or_plan=self.working_dir)
            if return_code != 0:
                raise CwsCommandError(err)

    @property
    def working_dir(self):
        return self.terraform.working_dir

    def init(self):
        return_code, _, err = self.terraform.init()
        if return_code != 0:
            raise CwsCommandError(err)

    def apply(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.apply(target=targets,
                                                   skip_plan=True,
                                                   input=False,
                                                   raise_on_error=False,
                                                   parallelism=1)
        if return_code != 0:
            raise CwsCommandError(err)

    def destroy(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.destroy(target=targets)
        if return_code != 0:
            raise CwsCommandError(err)

    def output(self):
        self.select_workspace("default")
        values = self.terraform.output(capture_output=True)
        return {key: value['value']
                for key, value in values.items()} if values else "{}"

    def workspace_list(self):
        self.select_workspace("default")
        return_code, out, err = self.terraform.cmd('workspace', 'list')
        if return_code != 0:
            raise CwsCommandError(err)
        values = out[1:].translate(str.maketrans('', '', ' \t\r')).split('\n')
        return filter(None, values)

    def select_workspace(self, workspace):
        return_code, out, err = self.terraform.workspace('select', workspace)
        if workspace != 'default' and return_code != 0:
            _, out, err = self.terraform.workspace('new',
                                                   workspace,
                                                   raise_on_error=True)
        if not (Path(self.working_dir) / '.terraform').exists():
            self.terraform.init(input=False, raise_on_error=True)
Ejemplo n.º 5
0
class TerraformUtils:
    VAR_FILE = "terraform.tfvars.json"

    def __init__(self, working_dir):
        logging.info("TF FOLDER %s ", working_dir)
        self.working_dir = working_dir
        self.var_file_path = os.path.join(working_dir, self.VAR_FILE)
        self.tf = Terraform(working_dir=working_dir,
                            state="terraform.tfstate",
                            var_file=self.VAR_FILE)
        self.init_tf()

    def init_tf(self):
        self.tf.cmd("init -plugin-dir=/root/.terraform.d/plugins/",
                    raise_on_error=True)

    def apply(self, refresh=True):
        return_value, output, err = self.tf.apply(no_color=IsFlagged,
                                                  refresh=refresh,
                                                  input=False,
                                                  skip_plan=True)
        if return_value != 0:
            message = f'Terraform apply failed with return value {return_value}, output {output} , error {err}'
            logging.error(message)
            raise Exception(message)

    def change_variables(self, variables, refresh=True):
        with open(self.var_file_path, "r+") as _file:
            tfvars = json.load(_file)
            tfvars.update(variables)
            _file.seek(0)
            _file.truncate()
            json.dump(tfvars, _file)
        self.apply(refresh=refresh)

    def get_state(self):
        return self.tf.tfstate

    def set_new_vip(self, api_vip):
        self.change_variables(variables={"api_vip": api_vip}, refresh=True)

    def destroy(self):
        self.tf.destroy(force=True, input=False, auto_approve=True)
Ejemplo n.º 6
0
def get_terraform_outputs() -> dict:
    tf = Terraform(working_dir='./WebInDeploy')
    rc, out, err = tf.cmd('output', '-json')

    if rc == 0:
        try:
            return json.loads(out)
        except ValueError as ve:
            print('Could not parse terraform outputs!')
            return dict()
Ejemplo n.º 7
0
def create_data(plateform, admin_password, app_password, unique_id):
    tf = Terraform(working_dir='terraform/layer-data')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    if code == 1:
        tf.cmd("workspace new " + plateform['name'],
               capture_output=False,
               no_color=IsNotFlagged,
               skip_plan=IsNotFlagged)
    code, _, _ = tf.apply(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'database_version':
        plateform['infrastructure']['cloudsql']['version'],
        'database_instance_type':
        plateform['infrastructure']['cloudsql']['instance-type'],
        'database_disk_size':
        plateform['infrastructure']['cloudsql']['disk-size'],
        'admin_password':
        admin_password.decode("utf-8"),
        'app_password':
        app_password.decode("utf-8"),
        "unique_id":
        unique_id,
        'env':
        plateform['type']
    },
                          capture_output=False,
                          no_color=IsNotFlagged,
                          skip_plan=True,
                          auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-data")
Ejemplo n.º 8
0
def delete_kubernetes(plateform):
    tf = Terraform(working_dir='terraform/layer-kubernetes')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)

    ip_1 = '10.0.0.1/32'
    ip_2 = '10.0.0.1/32'
    ip_3 = '10.0.0.1/32'
    ip_4 = '10.0.0.1/32'
    ip_5 = '10.0.0.1/32'

    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'k8s-version':
        plateform['infrastructure']['gke']['version'],
        'preemptible':
        plateform['infrastructure']['gke']['preemptible'],
        'instance-type':
        plateform['infrastructure']['gke']['instance-type'],
        'white-ip-1':
        ip_1,
        'white-ip-2':
        ip_2,
        'white-ip-3':
        ip_3,
        'white-ip-4':
        ip_4,
        'white-ip-5':
        ip_5,
        'min_node':
        plateform['infrastructure']['gke']['min'],
        'max_node':
        plateform['infrastructure']['gke']['max'],
        'range_ip_master':
        plateform['infrastructure']['range-ip-master']
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-kubernetes")
Ejemplo n.º 9
0
def create_base(plateform):
    tf = Terraform(working_dir='terraform/layer-base')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    if code == 1:
        tf.cmd("workspace new " + plateform['name'],
               capture_output=False,
               no_color=IsNotFlagged,
               skip_plan=IsNotFlagged)
    code, _, _ = tf.apply(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'range-ip':
        plateform['infrastructure']['range-ip'],
        'range-ip-pod':
        plateform['infrastructure']['range-ip-pod'],
        'range-ip-svc':
        plateform['infrastructure']['range-ip-svc'],
        'range-plateform':
        plateform['infrastructure']['range-plateform'],
        'allowed-ips':
        plateform['ips_whitelist'],
        'env':
        plateform['type']
    },
                          capture_output=False,
                          no_color=IsNotFlagged,
                          skip_plan=True,
                          auto_approve=True)
    if code != 0:
        raise Exception("error in Terraform layer-base")
Ejemplo n.º 10
0
class Component:

  blocname = ""
  component_name = ""
  workspace = ""
  
  def __init__(self, plateform):
    self.plateform = plateform
    self.get_constantes()
    if self.blocname not in plateform:
      return
    self.check()
    self.define_var()
    self.workspace = self.plateform_name

  def get_constantes(self):
    self.bucket_component_state = self.plateform['bucket-component-state']
    self.plateform_name = self.plateform['name']

    if 'region' not in self.plateform:
      self.plateform['region']='eu-west-1'

    self.region = self.plateform['region']

  def get_workspace(self):
    return self.workspace

  def define_var(self):
    self.var = {}

  def apply(self):
    pass

  def destroy(self):
    pass

  # to check:
  # - dependancies (exemple: rds need network)
  # - yaml validation for component
  def check(self):
    pass

  def init(self, working_dir):
    self.tf = Terraform(working_dir)
    self.tf.cmd(
      "init -backend-config=bucket=" + self.bucket_component_state + " -backend-config=region=" + self.region,
      capture_output=True,
      no_color=IsNotFlagged
    )

  def create(self, working_dir, var_component, skip_plan=True, workspace_name=""):

    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    if os.path.exists(working_dir+"/.terraform/environment"):
      os.remove(working_dir+"/.terraform/environment")
    else:
      print("File environment not exist")
    
    if os.path.exists(working_dir+"/.terraform/terraform.tfstate"):
      os.remove(working_dir+"/.terraform/terraform.tfstate")
    else:
      print("File terraform.tfstate not exist")
    
    self.init(working_dir=working_dir)
    
    # select workspace
    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      self.tf.cmd("workspace new " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    
    # terraform apply
    code, _, _ = tf.apply(
      var=var_component, 
      capture_output=False, 
      no_color=IsNotFlagged, 
      skip_plan=skip_plan,
      auto_approve=True)
    if code != 0:
      raise Exception("error in component: " + self.component_name)

  def delete(self, working_dir, var_component, skip_plan=True, workspace_name=""):
    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    if os.path.exists(working_dir+"/.terraform/environment"):
      os.remove(working_dir+"/.terraform/environment")
    else:
      print("File environment not exist")

    if os.path.exists(working_dir+"/.terraform/terraform.tfstate"):
      os.remove(working_dir+"/.terraform/terraform.tfstate")
    else:
      print("File terraform.tfstate not exist")
    
    self.init(working_dir=working_dir)
    
    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      print("workspace does not exist")
    else:
      code, _, _ = self.tf.destroy(
        var=var_component,
        capture_output=False, 
        no_color=IsNotFlagged, 
        skip_plan=IsNotFlagged,
        auto_approve=True)
      if code != 0:
        raise Exception("error in component: " + self.component_name)

  def output(self, var_name, working_dir, skip_plan=True, workspace_name=""):

    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    print("search output : " + var_name)

    self.tf = Terraform(working_dir)

    out = ''

    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      print("workspace does not exist")
    else:
      code, out, _ = self.tf.cmd(
        "output " + var_name,
        no_color=IsNotFlagged)
      if code != 0:
        raise Exception("error in component: " + self.component_name)

    return out
Ejemplo n.º 11
0
def main(username, password, aws_access_key, aws_secret_key, aws_region,
         ec2_key_pair, bootstrap_bucket):
    username = username
    password = password
    aws_access_key = aws_access_key
    aws_secret_key = aws_secret_key
    aws_region = aws_region
    ec2_key_pair = ec2_key_pair
    albDns = ''
    nlbDns = ''
    fwMgt = ''

    default_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region
    }

    WebInDeploy_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'bootstrap_s3bucket': bootstrap_bucket
    }

    waf_conf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'alb_arn': albDns,
        'nlb-dns': nlbDns
    }

    WebInFWConf_vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
        'ServerKeyName': ec2_key_pair,
        'mgt-ipaddress-fw1': fwMgt,
        'nlb-dns': nlbDns,
        'username': username,
        'password': password
    }

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    # update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = fwMgt

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt
    WebInFWConf_vars['nlb-dns'] = nlbDns

    WebInDeploy_vars['alb_dns'] = albDns
    WebInDeploy_vars['nlb-dns'] = nlbDns

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}
    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=capture_output, var=vars, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=waf_conf_vars,
                                            **kwargs)

    waf_conf_out = tf.output()

    update_status('waf_conf_output', waf_conf_out)
    # update_status('waf_conf_stdout', stdout)
    # update_status('waf_conf_stderr', stderr)

    logger.debug('Got Return code to deploy waf_conf {}'.format(return_code3))

    if return_code3 != 0:
        logger.info("waf_conf failed")
        update_status('waf_conf_status', 'error')
        update_status('waf_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('waf_conf_status', 'success')

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)
    updateHandle = updater.ContentUpdater(fw)

    # updateHandle.download(fw)
    # logger.info("Waiting 3 minutes for content update to download")
    # time.sleep(210)
    # updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebFWConfy failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
Ejemplo n.º 12
0
 def _tfcmd(self, cmd, **kwargs):
     tf = Terraform(working_dir=self.path)
     return tf.cmd(cmd, no_color=IsFlagged, **kwargs)
Ejemplo n.º 13
0
def main(aws_access_key, aws_secret_key, aws_region):
    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    vars = {
        'aws_access_key': aws_access_key,
        'aws_secret_key': aws_secret_key,
        'aws_region': aws_region,
    }

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=True,
                                          vars=vars,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy WebInDeploy ")

        exit()
    else:

        logger.info("Destroyed waf_conf Successfully")

    tf = Terraform(working_dir='./WebInDeploy')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          capture_output=True,
                                          vars=vars,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy destroyed")
        print('Failed to Destroy WebInDeploy')

        exit(1)
    else:
        print('Destroyed WebInDeploy Successfully')

        exit(0)
Ejemplo n.º 14
0
class Terraform:
    DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock'
    DEFAULT_DOCKER_ENTRYPOINT_PATH = '/docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = '/nginx.docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_IMAGE = 'nginx:stable-alpine'
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = '/usr/share/nginx/html'
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = '******'
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = 'file'

    # trick for terrascript
    class null_resource(Resource):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace('default')
        self.app.cmd('workspace delete', workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode='wt',
                                             suffix='.tf.json',
                                             dir=dir_path,
                                             delete=False)
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path,
            plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            print(config)
            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)
            print(output_result)
            output_var = {
                output_var_key: output_result[output_var_key]['value']
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            f"server",
            image=config_data['os_code'],
            name=config_data['hostname'],
            region=config_data['region_code'],
            size=config_data['plan_code'],
            ssh_keys=config_data['ssh_keys']
            if config_data.get('ssh_keys') else [])
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource['ssh_keys'] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            'ip', value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output('server_id',
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(f"server",
                                        plan_id=config_data['plan_code'],
                                        region_id=config_data['region_code'],
                                        os_id=config_data['os_code'],
                                        hostname=config_data['hostname'],
                                        ssh_key_ids=config_data['ssh_keys']
                                        if config_data.get('ssh_keys') else [])
        vultr_output_ip = Output('ip', value="${vultr_server.server.main_ip}")
        vultr_output_id = Output('server_id',
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key('vultr_ssh_key',
                                              name='default_key',
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner("remote-exec",
                           provisioner=provisioner(
                               "remote-exec",
                               inline=[
                                   'mkdir -p ~/.ssh',
                                   f"{public_key} >> ~/.ssh/authorized_keys"
                               ],
                           ))

    @classmethod
    def gen_ssh_conn_config(cls,
                            *,
                            ssh_user: str = DEFAULT_SSH_USER,
                            ssh_private_key: str,
                            ssh_host: str,
                            ssh_port: int = DEFAULT_SSH_PORT) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            'type': 'ssh',
            'user': ssh_user,
            'private_key': ssh_private_key,
            'host': ssh_host,
            'port': ssh_port,
            'timeout': '30s'
        }

    @classmethod
    def gen_site_docker_deploy_config(cls,
                                      *,
                                      docker_host: str = DEFAULT_DOCKER_HOST,
                                      site_name: str = None,
                                      template_tar_bytes: bytes = None,
                                      script: str = None,
                                      ssh_user: str = DEFAULT_SSH_USER,
                                      ssh_private_key: str,
                                      ssh_host: str,
                                      ssh_port: int = DEFAULT_SSH_PORT):
        config = Terrascript()
        docker_provider = provider.docker(host=docker_host,
                                          connection=cls.gen_ssh_conn_config(
                                              ssh_user=ssh_user,
                                              ssh_private_key=ssh_private_key,
                                              ssh_host=ssh_host,
                                              ssh_port=ssh_port))
        docker_image_resource = resource.docker_image(
            'nginx_image',
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            'nginx_container',
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={'internal': 80},
            upload=[])
        docker_name_resource = resource.random_pet(
            'docker_pet_name',
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode('utf8')
            template_tar_path = f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource['upload'].append({
                'content_base64':
                template_tar_file_content,
                'file':
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH)
            docker_container_resource['upload'].append({
                'content':
                entrypoint_sh_content,
                'file':
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(self,
                    *,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        exec_resource = self.null_resource('remote-exec',
                                           provisioner=provisioner(
                                               "remote-exec",
                                               inline=['ls -la'],
                                               connection=ssh_conn))

        exec_config += exec_resource
        return exec_config

    def upload_file(self,
                    content: str,
                    *,
                    destination: str = DEFAULT_UPLOAD_PATH,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        file_resource = self.null_resource('upload_file_resource',
                                           provisioner=provisioner(
                                               self.TERRAFORM_RESOURCE_FILE,
                                               content=content,
                                               destination=destination,
                                               connection=ssh_conn))

        upload_config += file_resource
        return upload_config
            try:
                with open(__terraform_main_tf, 'w') as fOut:
                    __content = get_terraform_file_contents(docker_compose_data, do_init=False, aws_ecs_cluster_name=__aws_ecs_cluster_name, aws_ecs_repo_name=__aws_ecs_repo_name, docker_compose_location=__docker_compose_location, aws_creds=aws_creds, aws_config=aws_config, aws_creds_src=__aws_creds_src__, aws_config_src=__aws_config_src__, aws_default_region=__aws_default_region__, aws_cli_ecr_describe_repos=__aws_cli_ecr_describe_repos__, aws_ecs_compute_engine=__aws_ecs_compute_engine)
                    #_content = pretty_json(__content)  # this does not work, at this time.
                    print(__content, file=fOut)
            except Exception as ex:
                was_exception = True
                extype, ex, tb = sys.exc_info()
                logger.exception('EXCEPTION -> {}'.format(__terraform_main_tf), ex)
            finally:
                if (was_exception):
                    logger.info('terraform file EXCEPTION!')
                else:
                    logger.info('terraform saved -> "{}"'.format(__terraform_main_tf))
            
            ret_code, s_out, err = tf.cmd('validate -json', capture_output=True)
            logger.info('terraform validate -> ret code -> {} err -> {}'.format(ret_code, err))

            was_exception = False
            try:
                __terraform_main_out_tf = os.sep.join([terraform_root, 'main_tf.json'])
                with open(__terraform_main_out_tf, 'w') as fOut:
                    print(s_out, file=fOut)
            except Exception as ex:
                was_exception = True
                extype, ex, tb = sys.exc_info()
                logger.exception('EXCEPTION -> {}'.format(__terraform_main_out_tf), ex)
            finally:
                if (was_exception):
                    logger.info('terraform file validation EXCEPTION!')
                else:
Ejemplo n.º 16
0
def main(username, password):
    username = username
    password = password

    WebInDeploy_vars = {'Admin_Username': username, 'Admin_Password': password}

    WebInBootstrap_vars = {
        'Admin_Username': username,
        'Admin_Password': password
    }

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    #
    # Destroy Infrastructure
    #
    tf = Terraform(working_dir='./WebInDeploy')
    rg_name = tf.output('RG_Name')

    attack_rg_name = tf.output('Attacker_RG_Name')
    logger.info('Got RG_Name {} and Attacker_RG_Name {}'.format(
        rg_name, attack_rg_name))

    WebInDeploy_vars.update({'RG_Name': rg_name})
    WebInDeploy_vars.update({'Attack_RG_Name': attack_rg_name})

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInDeploy_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("Failed to destroy build ")

        exit()
    else:

        logger.info("Destroyed WebInDeploy ")

    WebInBootstrap_vars.update({'RG_Name': rg_name})
    WebInBootstrap_vars.update({'Attack_RG_Name': attack_rg_name})

    tf = Terraform(working_dir='./WebInBootstrap')

    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.cmd('destroy',
                                          var=WebInBootstrap_vars,
                                          capture_output=False,
                                          **kwargs)
    # return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInBootstrap destroyed")
        deployment_status = {'WebInDeploy': 'Fail'}

        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        exit()
Ejemplo n.º 17
0
def create_kubernetes(plateform):
    tf = Terraform(working_dir='terraform/layer-kubernetes')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    if code == 1:
        tf.cmd("workspace new " + plateform['name'],
               capture_output=False,
               no_color=IsNotFlagged,
               skip_plan=IsNotFlagged)

    ip_1 = '10.0.0.1/32'
    ip_2 = '10.0.0.1/32'
    ip_3 = '10.0.0.1/32'
    ip_4 = '10.0.0.1/32'
    ip_5 = get('https://api.ipify.org').text + "/32"
    if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 1:
        ip_1 = plateform['infrastructure']['gke']['ips_whitelist'][0] + "/32"
    if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 2:
        ip_2 = plateform['infrastructure']['gke']['ips_whitelist'][1] + "/32"
    if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 3:
        ip_3 = plateform['infrastructure']['gke']['ips_whitelist'][2] + "/32"
    if len(plateform['infrastructure']['gke']['ips_whitelist']) >= 4:
        ip_4 = plateform['infrastructure']['gke']['ips_whitelist'][3] + "/32"

    code, _, _ = tf.apply(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'k8s-version':
        plateform['infrastructure']['gke']['version'],
        'preemptible':
        plateform['infrastructure']['gke']['preemptible'],
        'instance-type':
        plateform['infrastructure']['gke']['instance-type'],
        'white-ip-1':
        ip_1,
        'white-ip-2':
        ip_2,
        'white-ip-3':
        ip_3,
        'white-ip-4':
        ip_4,
        'white-ip-5':
        ip_5,
        'min_node':
        plateform['infrastructure']['gke']['min'],
        'max_node':
        plateform['infrastructure']['gke']['max'],
        'range_ip_master':
        plateform['infrastructure']['range-ip-master']
    },
                          capture_output=False,
                          no_color=IsNotFlagged,
                          skip_plan=True,
                          auto_approve=True)

    print("Post Apply script execution...")
    subprocess.call([
        "terraform/layer-kubernetes/apply_post.sh", plateform['name'],
        plateform['gcp-project']
    ])

    if code != 0:
        raise Exception("error in Terraform layer-kubernetes")
Ejemplo n.º 18
0
class TerraformUtils:
    VAR_FILE = "terraform.tfvars.json"
    STATE_FILE = "terraform.tfstate"

    def __init__(self, working_dir: str, terraform_init: bool = True):
        log.info("TF FOLDER %s ", working_dir)
        self.working_dir = working_dir
        self.var_file_path = os.path.join(working_dir, self.VAR_FILE)
        self.tf = Terraform(working_dir=working_dir,
                            state=self.STATE_FILE,
                            var_file=self.VAR_FILE)

        if terraform_init:
            self.init_tf()

    @retry(exceptions=TerraformCommandError, tries=10, delay=10)
    def init_tf(self) -> None:
        self.tf.cmd("init", raise_on_error=True, capture_output=True)

    def select_defined_variables(self, **kwargs):
        supported_variables = self.get_variable_list()
        return {
            k: v
            for k, v in kwargs.items() if v and k in supported_variables
        }

    def get_variable_list(self):
        results = list()

        for tf_file in pathlib.Path(self.working_dir).glob("*.tf"):
            with open(tf_file, "r") as fp:
                terraform_file_dict = hcl2.load(fp)
                results += terraform_file_dict[
                    "variable"] if "variable" in terraform_file_dict else list(
                    )

        return list(map(lambda d: next(iter(d)), results))

    def apply(self, refresh: bool = True) -> None:
        return_value, output, err = self.tf.apply(no_color=IsFlagged,
                                                  refresh=refresh,
                                                  input=False,
                                                  skip_plan=True)
        if return_value != 0:
            message = f"Terraform apply failed with return value {return_value}, output {output} , error {err}"
            log.error(message)
            raise Exception(message)

    def set_and_apply(self, refresh: bool = True, **kwargs) -> None:
        defined_variables = self.select_defined_variables(**kwargs)
        self.change_variables(defined_variables)
        self.init_tf()
        self.apply(refresh=refresh)

    def change_variables(self,
                         variables: Dict[str, str],
                         refresh: bool = True) -> None:
        with open(self.var_file_path, "r+") as _file:
            tfvars = json.load(_file)
            tfvars.update(variables)
            _file.seek(0)
            _file.truncate()
            json.dump(tfvars, _file)
        self.apply(refresh=refresh)

    def get_state(self) -> Tfstate:
        self.tf.read_state_file(self.STATE_FILE)
        return self.tf.tfstate

    def get_resources(self, resource_type: str = None) -> List[Dict[str, Any]]:
        state = self.get_state()
        return [
            resource for resource in state.resources
            if resource_type is None or resource["type"] == resource_type
        ]

    def set_new_vips(self, api_vip: str, ingress_vip: str) -> None:
        self.change_variables(variables={
            "api_vip": api_vip,
            "ingress_vip": ingress_vip
        },
                              refresh=True)

    def destroy(self) -> None:
        self.tf.destroy(force=True, input=False, auto_approve=True)
Ejemplo n.º 19
0
def main(fwUsername, fwPasswd):

    albDns = ''
    nlbDns = ''
    fwMgt = ''

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    deployment_status = {}
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    # capture_output = kwargs.pop('capture_output', True)
    #
    # if capture_output is True:
    #     stderr = subprocess.PIPE
    #     stdout = subprocess.PIPE
    # else:
    #     stderr = sys.stderr
    #     stdout = sys.stdout

    #
    # Build Infrastructure
    #

    tf = Terraform(working_dir='./WebInDeploy')
    tf.cmd('init')
    if run_plan:
        print('Calling tf.plan')
        tf.plan(capture_output=False)

    return_code1, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            **kwargs)
    #return_code1 =0
    print('Got return code {}'.format(return_code1))

    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        deployment_status = {'WebInDeploy': 'Fail'}
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status = {'WebInDeploy': 'Success'}
        write_status_file(deployment_status)

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    # fwUsername = "******"
    # fwPasswd = "PaloAlt0!123!!"
    fw_trust_ip = fwMgt

    #
    # Apply WAF Rules
    #

    tf = Terraform(working_dir='./waf_conf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying WAF config to App LB")

    if run_plan:
        tf.plan(capture_output=False, var={'alb_arn': nlbDns}, **kwargs)

    return_code3, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'alb_arn': nlbDns,
                                                'int-nlb-fqdn': nlbDns
                                            },
                                            **kwargs)

    if return_code3 != 0:
        logger.info("waf_conf failed")
        deployment_status.update({'waf_conf': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'waf_conf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Got these values from output of first run\n\n")
    logger.info("ALB address is {}".format(albDns))
    logger.info("nlb address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    #

    class FWNotUpException(Exception):
        pass

    err = 'no'
    api_key = ''
    api_key = getApiKey(fw_trust_ip, fwUsername, fwPasswd)

    while True:
        err = getFirewallStatus(fw_trust_ip, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")
            #raise FWNotUpException('FW is not up!  Request Timeout')

        elif err == 'no':
            logger.info("FW is not up...yet")
            print("FW is not up...yet")
            time.sleep(60)
            continue
            #raise FWNotUpException('FW is not up!')
        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue
        elif err == 'yes':
            logger.info("[INFO]: FW is up")
            break

    fw = firewall.Firewall(hostname=fw_trust_ip,
                           api_username=fwUsername,
                           api_password=fwPasswd)
    logger.info("Updating firewall with latest content pack")
    updateHandle = updater.ContentUpdater(fw)

    updateHandle.download()

    logger.info("Waiting 3 minutes for content update to download")
    time.sleep(210)
    updateHandle.install()

    #
    # Configure Firewall
    #

    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    if run_plan:
        tf.plan(capture_output=False,
                var={
                    'mgt-ipaddress-fw1': fwMgt,
                    'int-nlb-fqdn': nlbDns
                })

    return_code2, stdout, stderr = tf.apply(capture_output=False,
                                            skip_plan=True,
                                            var={
                                                'mgt-ipaddress-fw1': fwMgt,
                                                'nlb-dns': nlbDns,
                                                'aws_access_key':
                                                aws_access_key,
                                                'aws_secret_key':
                                                aws_secret_key
                                            },
                                            **kwargs)
    #return_code2 = 0
    if return_code2 != 0:
        logger.info("WebFWConfy failed")
        deployment_status.update({'WebFWConfy': 'Fail'})
        write_status_file(deployment_status)
        exit()
    else:
        deployment_status.update({'WebFWConf': 'Success'})
        write_status_file(deployment_status)

    logger.info("Commit changes to firewall")

    fw.commit()

    logger.info('Checking if Jenkins Server is ready')

    #    tf = Terraform(working_dir='./WebInDeploy')
    #   albDns = tf.output('ALB-DNS')
    count = 0
    max_tries = 3
    while True:
        if count < max_tries:
            res = getServerStatus(albDns)
            if res == 'server_down':
                count = count + 1
                time.sleep(2)
                continue
            elif res == 'server_up':
                break
        else:
            break
    logger.info('Jenkins Server is ready')
    logger.info('\n\n   ### Deployment Complete ###')
    logger.info('\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
Ejemplo n.º 20
0
def main(username, password, rg_name, azure_region):
    username = username
    password = password

    WebInBootstrap_vars = {'RG_Name': rg_name, 'Azure_Region': azure_region}

    WebInDeploy_vars = {
        'Admin_Username': username,
        'Admin_Password': password,
        'Azure_Region': azure_region
    }

    WebInFWConf_vars = {'Admin_Username': username, 'Admin_Password': password}

    # Set run_plan to TRUE is you wish to run terraform plan before apply
    run_plan = False
    kwargs = {"auto-approve": True}

    # Class Terraform uses subprocess and setting capture_output to True will capture output
    capture_output = kwargs.pop('capture_output', False)

    if capture_output is True:
        stderr = subprocess.PIPE
        stdout = subprocess.PIPE
    else:
        # if capture output is False, then everything will essentially go to stdout and stderrf
        stderr = sys.stderr
        stdout = sys.stdout
        start_time = time.asctime()
        print(f'Starting Deployment at {start_time}\n')

    # Create Bootstrap

    tf = Terraform(working_dir='./WebInBootstrap')

    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False)
    return_code1, stdout, stderr = tf.apply(vars=WebInBootstrap_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    resource_group = tf.output('Resource_Group')
    bootstrap_bucket = tf.output('Bootstrap_Bucket')
    storage_account_access_key = tf.output('Storage_Account_Access_Key')
    web_in_bootstrap_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_bootstrap_output', web_in_bootstrap_output)

    if return_code1 != 0:
        logger.info("WebInBootstrap failed")
        update_status('web_in_bootstap_status', 'error')
        update_status('web_in_bootstrap_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_bootstrap_status', 'success')

    share_prefix = 'jenkins-demo'

    share_name = create_azure_fileshare(share_prefix, bootstrap_bucket,
                                        storage_account_access_key)

    WebInDeploy_vars.update(
        {'Storage_Account_Access_Key': storage_account_access_key})
    WebInDeploy_vars.update({'Bootstrap_Storage_Account': bootstrap_bucket})
    WebInDeploy_vars.update({'RG_Name': resource_group})
    WebInDeploy_vars.update({'Attack_RG_Name': resource_group})
    WebInDeploy_vars.update({'Storage_Account_Fileshare': share_name})

    # Build Infrastructure

    tf = Terraform(working_dir='./WebInDeploy')
    # print("vars {}".format(WebInDeploy_vars))
    tf.cmd('init')
    if run_plan:
        # print('Calling tf.plan')
        tf.plan(capture_output=False, var=WebInDeploy_vars)

    return_code1, stdout, stderr = tf.apply(var=WebInDeploy_vars,
                                            capture_output=capture_output,
                                            skip_plan=True,
                                            **kwargs)

    web_in_deploy_output = tf.output()

    logger.debug(
        'Got Return code for deploy WebInDeploy {}'.format(return_code1))

    update_status('web_in_deploy_stdout', stdout)
    update_status('web_in_deploy_output', web_in_deploy_output)
    if return_code1 != 0:
        logger.info("WebInDeploy failed")
        update_status('web_in_deploy_status', 'error')
        update_status('web_in_deploy_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_deploy_status', 'success')

    albDns = tf.output('ALB-DNS')
    fwMgt = tf.output('MGT-IP-FW-1')
    nlbDns = tf.output('NLB-DNS')
    fwMgtIP = tf.output('MGT-IP-FW-1')

    logger.info("Got these values from output \n\n")
    logger.info("AppGateway address is {}".format(albDns))
    logger.info("Internal loadbalancer address is {}".format(nlbDns))
    logger.info("Firewall Mgt address is {}".format(fwMgt))

    #
    # Check firewall is up and running
    # #

    api_key = getApiKey(fwMgtIP, username, password)

    while True:
        err = getFirewallStatus(fwMgtIP, api_key)
        if err == 'cmd_error':
            logger.info("Command error from fw ")

        elif err == 'no':
            logger.info("FW is not up...yet")
            # print("FW is not up...yet")
            time.sleep(60)
            continue

        elif err == 'almost':
            logger.info("MGT up waiting for dataplane")
            time.sleep(20)
            continue

        elif err == 'yes':
            logger.info("FW is up")
            break

    logger.debug(
        'Giving the FW another 10 seconds to fully come up to avoid race conditions'
    )
    time.sleep(10)
    fw = firewall.Firewall(hostname=fwMgtIP,
                           api_username=username,
                           api_password=password)
    logger.info("Updating firewall with latest content pack")

    update_fw(fwMgtIP, api_key)

    #
    # Configure Firewall
    #
    WebInFWConf_vars.update({'FW_Mgmt_IP': fwMgtIP})
    tf = Terraform(working_dir='./WebInFWConf')
    tf.cmd('init')
    kwargs = {"auto-approve": True}

    logger.info("Applying addtional config to firewall")

    WebInFWConf_vars['mgt-ipaddress-fw1'] = fwMgt

    if run_plan:
        tf.plan(capture_output=capture_output, var=WebInFWConf_vars)

    # update initial vars with generated fwMgt ip

    return_code2, stdout, stderr = tf.apply(capture_output=capture_output,
                                            skip_plan=True,
                                            var=WebInFWConf_vars,
                                            **kwargs)

    web_in_fw_conf_out = tf.output()

    update_status('web_in_fw_conf_output', web_in_fw_conf_out)
    # update_status('web_in_fw_conf_stdout', stdout)

    logger.debug(
        'Got Return code for deploy WebInFwConf {}'.format(return_code2))

    if return_code2 != 0:
        logger.error("WebInFWConf failed")
        update_status('web_in_fw_conf_status', 'error')
        update_status('web_in_fw_conf_stderr', stderr)
        print(json.dumps(status_output))
        exit(1)
    else:
        update_status('web_in_fw_conf_status', 'success')

    logger.info("Commit changes to firewall")

    fw.commit()
    logger.info("waiting for commit")
    time.sleep(60)
    logger.info("waiting for commit")

    #
    # Check Jenkins
    #

    logger.info('Checking if Jenkins Server is ready')

    # FIXME - add outputs for all 3 dirs

    res = getServerStatus(albDns)

    if res == 'server_up':
        logger.info('Jenkins Server is ready')
        logger.info('\n\n   ### Deployment Complete ###')
        logger.info(
            '\n\n   Connect to Jenkins Server at http://{}'.format(albDns))
    else:
        logger.info('Jenkins Server is down')
        logger.info('\n\n   ### Deployment Complete ###')

    # dump out status to stdout
    print(json.dumps(status_output))
Ejemplo n.º 21
0
class TerraformWorker:
    def __init__(self, config_path, workspace, isolate=True, logger=None):
        self.logger = logger or logging.getLogger(__name__)
        self.isolate = isolate
        self.config_path = config_path
        self.tf = Terraform(working_dir=self.config_path)
        self.workspace = workspace

    def __getattr__(self, item):
        def wrapper(*args, **kwargs):
            kwargs.update({'no_color': IsFlagged})

            if item in KWARGS_MAPPING:
                kwargs.update(KWARGS_MAPPING[item])

            rc, stdout, stderr = self.tf.cmd(
                item, *args, **kwargs)

            return rc, stdout.strip(), stderr.strip()

        return wrapper

    @property
    def config_path(self):
        return self._config_path

    @config_path.setter
    def config_path(self, c):
        self._config = TerraformConfig(c)
        if self.isolate:
            self._config_path = self._config.clone()
        else:
            self._config_path = self._config.path

    @property
    def workspace(self):
        return self._workspace

    @workspace.setter
    def workspace(self, w):
        expr = re.compile('^[a-z0-9\-_.~]{1,255}$', re.IGNORECASE)

        if not re.match(expr, w):
            raise TerrestrialFatalError(
                'Workspace name must contain only URL safe characters.')

        rc, stdout, stderr = self.tf.cmd(
            'workspace', 'new', w, '-no-color')

        if rc != 0:
            if 'already exists' in stderr:
                rc, stdout, stderr = self.tf.cmd(
                    'workspace', 'select', w ,'-no-color')

                if rc != 0:
                    raise TerrestrialFatalError(
                        f'Failed to set workspace to {self.workspace}')

        self.logger.debug(
            f'Switched workspace to {self.workspace}')

        self._workspace = w

    def __enter__(self):
        return self

    def __exit__(self, exc_t, exc_v, traceback):
        self._config.close()
Ejemplo n.º 22
0
class Terraform:
    DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
    DEFAULT_DOCKER_ENTRYPOINT_PATH = "/docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = "/nginx.docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_IMAGE = "nginx:stable-alpine"
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = "/usr/share/nginx/html"
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = "******"
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = "file"

    # trick for terrascript
    class null_resource(Resource):
        ...

    class tencentcloud(Provider):
        ...

    class tencentcloud_availability_zones(Data):
        ...

    class tencentcloud_images(Data):
        ...

    class tencentcloud_instance_types(Data):
        ...

    class tencentcloud_security_group(Resource):
        ...

    class tencentcloud_security_group_lite_rule(Resource):
        ...

    class tencentcloud_instance(Resource):
        ...

    class tencentcloud_key_pair(Resource):
        ...

    class alicloud(Provider):
        ...

    class alicloud_vpc(Resource):
        ...

    class alicloud_key_pair(Resource):
        ...

    class alicloud_security_group(Resource):
        ...

    class alicloud_security_group_rule(Resource):
        ...

    class alicloud_instance(Resource):
        ...

    class alicloud_vswitch(Resource):
        ...

    class alicloud_zones(Data):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace("default")
        self.app.cmd("workspace delete", workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode="wt",
                                             suffix=".tf.json",
                                             dir=dir_path,
                                             delete=False)
        logging.info(str(config))
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path
            # disable maual plugin because it changes toooo fast
            # dir_path, plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)

            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)

            output_var = {
                output_var_key: output_result[output_var_key]["value"]
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            "server",
            image=config_data["os_code"],
            name=config_data["hostname"],
            region=config_data["region_code"],
            size=config_data["plan_code"],
            ssh_keys=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource["ssh_keys"] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            "ip", value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output("server_id",
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(
            "server",
            plan_id=config_data["plan_code"],
            region_id=config_data["region_code"],
            os_id=config_data["os_code"],
            hostname=config_data["hostname"],
            ssh_key_ids=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
        vultr_output_id = Output("server_id",
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key("vultr_ssh_key",
                                              name="default_key",
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def gen_tencent_cloud_config(
        cls,
        config_data: dict,
        token: str,
        public_key_name: str = None,
        secret_id: str = None,
    ):
        tencent_cloud_config = Terrascript()
        tencent_terraform = terraform(
            **{
                "required_providers": {
                    "tencentcloud": {
                        "source": "terraform-providers/tencentcloud",
                        "version": "~> 1.40.3",
                    },
                }
            })

        tencent_cloud_provider = cls.tencentcloud(
            secret_id=secret_id,
            secret_key=token,
            region=config_data["region_code"],
        )
        tencent_zone = cls.tencentcloud_availability_zones("default")
        tencent_security_group = cls.tencentcloud_security_group(
            "default", name="all-open", description="open all ports")
        tencent_security_group_rule = cls.tencentcloud_security_group_lite_rule(
            "rule",
            security_group_id="${tencentcloud_security_group.default.id}",
            ingress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
            egress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
        )
        tencent_cloud_server = cls.tencentcloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone=
            "${data.tencentcloud_availability_zones.default.zones.0.name}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            disable_monitor_service=True,
            disable_security_service=True,
            allocate_public_ip=True,
            internet_max_bandwidth_out=5,
            instance_charge_type="POSTPAID_BY_HOUR",
            internet_charge_type="TRAFFIC_POSTPAID_BY_HOUR",
            system_disk_type="CLOUD_SSD",
            count=1,
        )
        tencent_output_ip = Output(
            "ip", value="${tencentcloud_instance.server.0.public_ip}")
        tencent_output_id = Output(
            "server_id", value="${tencentcloud_instance.server.0.id}")

        if public_key_name:
            tencent_cloud_server["key_name"] = public_key_name

        tencent_cloud_config += tencent_terraform
        tencent_cloud_config += tencent_cloud_provider
        tencent_cloud_config += tencent_zone
        tencent_cloud_config += tencent_security_group
        tencent_cloud_config += tencent_security_group_rule
        tencent_cloud_config += tencent_cloud_server
        tencent_cloud_config += tencent_output_ip
        tencent_cloud_config += tencent_output_id

        return tencent_cloud_config

    @classmethod
    def gen_ali_cloud_config(
        cls,
        config_data: dict,
        token: str,
        ssh_key_name: str = None,
        access_key: str = None,
        security_groups: List[str] = [],
    ):
        ali_cloud_config = Terrascript()
        ali_cloud_provider = cls.alicloud(
            access_key=access_key,
            secret_key=token,
            region=config_data["region_code"],
        )

        ali_zone = cls.alicloud_zones(
            "default",
            available_disk_category="cloud_efficiency",
            available_resource_creation="Instance",
        )
        ali_vpc = cls.alicloud_vpc(
            "vpc",
            cidr_block="172.16.0.0/12",
        )
        ali_vswitch = cls.alicloud_vswitch(
            "vswitch",
            vpc_id="${alicloud_vpc.vpc.id}",
            cidr_block="172.16.0.0/29",
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
        )
        ali_security_group = cls.alicloud_security_group(
            "group",
            name="all-open",
            vpc_id="${alicloud_vpc.vpc.id}",
            description="open all ports",
            inner_access_policy="Accept",
        )
        ali_internet_security_group_rule = cls.alicloud_security_group_rule(
            "internet",
            # nic_type="internet",
            security_group_id="${alicloud_security_group.group.id}",
            type="ingress",
            port_range="-1/-1",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
        )
        ali_intranet_security_group_rule = cls.alicloud_security_group_rule(
            "intranet",
            # nic_type="intranet",
            security_group_id="${alicloud_security_group.group.id}",
            port_range="-1/-1",
            type="egress",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
            priority=1,
        )
        ali_cloud_server = cls.alicloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
            # security_groups=security_groups,
            security_groups="${alicloud_security_group.group.*.id}",
            vswitch_id="${alicloud_vswitch.vswitch.id}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            security_enhancement_strategy="Deactive",
            instance_charge_type="PostPaid",
            internet_charge_type="PayByTraffic",
            internet_max_bandwidth_out=2,
        )
        ali_output_ip = Output("ip",
                               value="${alicloud_instance.server.public_ip}")
        ali_output_id = Output("server_id",
                               value="${alicloud_instance.server.id}")

        if ssh_key_name:
            ali_cloud_server["key_name"] = ssh_key_name

        ali_cloud_config += ali_cloud_provider
        ali_cloud_config += ali_zone
        ali_cloud_config += ali_vpc
        ali_cloud_config += ali_vswitch
        ali_cloud_config += ali_security_group
        ali_cloud_config += ali_internet_security_group_rule
        ali_cloud_config += ali_intranet_security_group_rule
        ali_cloud_config += ali_cloud_server
        ali_cloud_config += ali_output_ip
        ali_cloud_config += ali_output_id

        return ali_cloud_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner(
            "remote-exec",
            provisioner=provisioner(
                "remote-exec",
                inline=[
                    "mkdir -p ~/.ssh",
                    f"{public_key} >> ~/.ssh/authorized_keys"
                ],
            ),
        )

    @classmethod
    def gen_ssh_conn_config(
        cls,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            "type": "ssh",
            "user": ssh_user,
            "private_key": ssh_private_key,
            "host": ssh_host,
            "port": ssh_port,
            "timeout": "30s",
        }

    @classmethod
    def gen_site_docker_deploy_config(
        cls,
        *,
        docker_host: str = DEFAULT_DOCKER_HOST,
        site_name: str = None,
        template_tar_bytes: bytes = None,
        script: str = None,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        config = Terrascript()
        docker_provider = provider.docker(
            host=docker_host,
            connection=cls.gen_ssh_conn_config(
                ssh_user=ssh_user,
                ssh_private_key=ssh_private_key,
                ssh_host=ssh_host,
                ssh_port=ssh_port,
            ),
        )
        docker_image_resource = resource.docker_image(
            "nginx_image",
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            "nginx_container",
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={"internal": 80},
            upload=[],
        )
        docker_name_resource = resource.random_pet(
            "docker_pet_name",
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = (
                f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz", )
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode("utf8")
            template_tar_path = (
                f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            )
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource["upload"].append({
                "content_base64":
                template_tar_file_content,
                "file":
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH,
            )
            docker_container_resource["upload"].append({
                "content":
                entrypoint_sh_content,
                "file":
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(
        self,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        exec_resource = self.null_resource(
            "remote-exec",
            provisioner=provisioner("remote-exec",
                                    inline=["ls -la"],
                                    connection=ssh_conn),
        )

        exec_config += exec_resource
        return exec_config

    def upload_file(
        self,
        content: str,
        *,
        destination: str = DEFAULT_UPLOAD_PATH,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        file_resource = self.null_resource(
            "upload_file_resource",
            provisioner=provisioner(
                self.TERRAFORM_RESOURCE_FILE,
                content=content,
                destination=destination,
                connection=ssh_conn,
            ),
        )

        upload_config += file_resource
        return upload_config