예제 #1
0
def setup_terraform(version, environment, aws_region, terraform_bin_path,
                    s3_bucket_name):
    print(f"deploy test to region {aws_region}")

    tf = Terraform(working_dir=full_path,
                   terraform_bin_path=terraform_bin_path)
    var_tf = {
        "module_version": version,
        "tiingo_api_key": os.environ.get("TIINGO_API_KEY"),
        "aws_region": aws_region,
        "environment": environment,
        "s3_bucket_name": s3_bucket_name,
    }

    tf.init()
    ret_code, out, err = tf.apply(skip_plan=True, var=var_tf)

    if ret_code != 0:
        print(err)
        ret_code, out, err = tf.destroy(var=var_tf)
        raise Exception("Error applying terraform. Error \n {}".format(err))

    yield

    ret_code, out, err = tf.destroy(var=var_tf)

    if ret_code != 0:
        print(err)
        raise Exception("Error detroying terraform. Error \n {}".format(err))
예제 #2
0
class TerraformProvider():
    def __init__(self, configuration, terraform_workspace):
        log.info("Preparing terraform deployment")
        log.debug("Using workspace: {}".format(terraform_workspace))

        self._backend_provider = get_backend_provider(configuration,
                                                      terraform_workspace)
        self._controller = Terraform(
            working_dir=terraform_workspace,
            variables=configuration["terraform"]["parameters"])

        self._controller.init(capture_output=False, force_copy=IsFlagged)

    @stage("Terraform deploy")
    def deploy(self):
        log.info("Deploying terraform infrastructure")

        self._backend_provider.init_remote_backend()
        self._controller.apply(capture_output=False, skip_plan=True)
        output = self._controller.output()
        artifact.create("terraform_output", content=json.dumps(output))

    def destroy(self):
        log.info("Destroying terraform infrastructure")

        self._controller.destroy(capture_output=False)
        self._backend_provider.remove_remote_backend()
예제 #3
0
class TerraformUtils:
    VAR_FILE = "terraform.tfvars.json"
    STATE_FILE = "terraform.tfstate"

    def __init__(self, working_dir: str):
        logging.info("TF FOLDER %s ", working_dir)
        self.working_dir = working_dir
        self.var_file_path = os.path.join(working_dir, self.VAR_FILE)
        self.tf = Terraform(working_dir=working_dir,
                            state=self.STATE_FILE,
                            var_file=self.VAR_FILE)
        self.init_tf()

    def init_tf(self) -> None:
        self.tf.cmd("init -plugin-dir=/root/.terraform.d/plugins/",
                    raise_on_error=True)

    def apply(self, refresh: bool = True) -> None:
        return_value, output, err = self.tf.apply(no_color=IsFlagged,
                                                  refresh=refresh,
                                                  input=False,
                                                  skip_plan=True)
        if return_value != 0:
            message = f"Terraform apply failed with return value {return_value}, output {output} , error {err}"
            logging.error(message)
            raise Exception(message)

    def change_variables(self,
                         variables: Dict[str, str],
                         refresh: bool = True) -> None:
        with open(self.var_file_path, "r+") as _file:
            tfvars = json.load(_file)
            tfvars.update(variables)
            _file.seek(0)
            _file.truncate()
            json.dump(tfvars, _file)
        self.apply(refresh=refresh)

    def get_state(self) -> str:
        self.tf.read_state_file(self.STATE_FILE)
        return self.tf.tfstate

    def set_new_vips(self, api_vip: str, ingress_vip: str) -> None:
        self.change_variables(variables={
            "api_vip": api_vip,
            "ingress_vip": ingress_vip
        },
                              refresh=True)

    def destroy(self) -> None:
        self.tf.destroy(force=True, input=False, auto_approve=True)
예제 #4
0
def vm_docker_deploy_old(config):
    # This script should deploy the instance and return the output/logs after the test has finished

    file_dir = os.path.dirname(os.path.realpath(__file__))
    provider = config["params"]["Provider"][0]
    ### Check that a selection was made
    if config["selection"]["instance"] is None:
        config["logs"] = None
        return config

    ### Setup terraform objects
    instance_wkdir = file_dir + "/instance_deploy/" + provider
    instance_tf = Terraform(working_dir=instance_wkdir)
    docker_tf = Terraform(file_dir + "/docker_deploy")

    tfstate_path = config["base_dir"] + '/tf_states/' + str(config["job_id"])
    tfvars = config["base_dir"] + "/tfvars.tfvars"

    ## ALSO DIRECT TO A VARS.TF IN THE BASE_DIR
    instance_tf.init(
        backend_config={'path': tfstate_path + '/terraform.tfstate'})
    instance_tf.apply(var_file=tfvars,
                      lock=False,
                      var={'instance_type': config["selection"]["instance"]},
                      skip_plan=True)

    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    docker_tf.apply(var_file=tfvars,
                    lock=False,
                    var={'tfstate_path': tfstate_path},
                    skip_plan=True)

    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    logs = docker_tf.output()
    config["logs"] = logs
    docker_tf.init(backend_config={
        'path': tfstate_path + '/docker_tfstate/terraform.tfstate'
    })
    docker_tf.destroy(auto_approve=True)
    instance_tf.init(
        backend_config={'path': tfstate_path + '/terraform.tfstate'})
    instance_tf.destroy(auto_approve=True)

    return config
예제 #5
0
def destroy(pathToInf):

    terra = Terraform(pathToInf)

    return_code, stdout, stderr = terra.destroy()

    return return_code
예제 #6
0
def delete_bastion(plateform):
    tf = Terraform(working_dir='terraform/layer-bastion')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'instance_type':
        plateform['infrastructure']['bastion']['instance-type'],
        'instance_image':
        plateform['infrastructure']['bastion']['image'],
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-data")
예제 #7
0
def delete_base(plateform):
    tf = Terraform(working_dir='terraform/layer-base')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'range-ip':
        plateform['infrastructure']['range-ip'],
        'range-ip-pod':
        plateform['infrastructure']['range-ip-pod'],
        'range-ip-svc':
        plateform['infrastructure']['range-ip-svc'],
        'range-plateform':
        plateform['infrastructure']['range-plateform'],
        'allowed-ips':
        plateform['ips_whitelist'],
        'env':
        plateform['type']
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)
    if code != 0:
        raise Exception("error in Terraform layer-base")
예제 #8
0
 def test_destroy(self):
     tf = Terraform(working_dir=current_path,
                    variables={"test_var": "test"})
     tf.init("var_to_output")
     ret, out, err = tf.destroy("var_to_output")
     assert ret == 0
     assert "Destroy complete! Resources: 0 destroyed." in out
예제 #9
0
class Terraform:
    def __init__(self, init):
        from python_terraform import Terraform as PythonTerraform

        self.terraform = PythonTerraform(working_dir='terraform')
        Path(self.working_dir).mkdir(exist_ok=True)
        if init:
            return_code, _, err = self.terraform.init(
                dir_or_plan=self.working_dir)
            if return_code != 0:
                raise CwsCommandError(err)

    @property
    def working_dir(self):
        return self.terraform.working_dir

    def init(self):
        return_code, _, err = self.terraform.init()
        if return_code != 0:
            raise CwsCommandError(err)

    def apply(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.apply(target=targets,
                                                   skip_plan=True,
                                                   input=False,
                                                   raise_on_error=False,
                                                   parallelism=1)
        if return_code != 0:
            raise CwsCommandError(err)

    def destroy(self, workspace, targets):
        self.select_workspace(workspace)
        return_code, _, err = self.terraform.destroy(target=targets)
        if return_code != 0:
            raise CwsCommandError(err)

    def output(self):
        self.select_workspace("default")
        values = self.terraform.output(capture_output=True)
        return {key: value['value']
                for key, value in values.items()} if values else "{}"

    def workspace_list(self):
        self.select_workspace("default")
        return_code, out, err = self.terraform.cmd('workspace', 'list')
        if return_code != 0:
            raise CwsCommandError(err)
        values = out[1:].translate(str.maketrans('', '', ' \t\r')).split('\n')
        return filter(None, values)

    def select_workspace(self, workspace):
        return_code, out, err = self.terraform.workspace('select', workspace)
        if workspace != 'default' and return_code != 0:
            _, out, err = self.terraform.workspace('new',
                                                   workspace,
                                                   raise_on_error=True)
        if not (Path(self.working_dir) / '.terraform').exists():
            self.terraform.init(input=False, raise_on_error=True)
예제 #10
0
 def destroy_terraform(self, cluster):
     if not self.working_path:
         self.working_path = create_terrafrom_working_dir(cluster_name=cluster)
     t = Terraform(working_dir=self.working_path)
     p, _, _ = t.destroy('./', synchronous=False, no_color=IsNotFlagged, refresh=True)
     for i in p.stdout:
         print(i.decode())
     _, err = p.communicate()
     print(err.decode())
     return p.returncode == 0
예제 #11
0
def delete_kubernetes(plateform):
    tf = Terraform(working_dir='terraform/layer-kubernetes')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)

    ip_1 = '10.0.0.1/32'
    ip_2 = '10.0.0.1/32'
    ip_3 = '10.0.0.1/32'
    ip_4 = '10.0.0.1/32'
    ip_5 = '10.0.0.1/32'

    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'k8s-version':
        plateform['infrastructure']['gke']['version'],
        'preemptible':
        plateform['infrastructure']['gke']['preemptible'],
        'instance-type':
        plateform['infrastructure']['gke']['instance-type'],
        'white-ip-1':
        ip_1,
        'white-ip-2':
        ip_2,
        'white-ip-3':
        ip_3,
        'white-ip-4':
        ip_4,
        'white-ip-5':
        ip_5,
        'min_node':
        plateform['infrastructure']['gke']['min'],
        'max_node':
        plateform['infrastructure']['gke']['max'],
        'range_ip_master':
        plateform['infrastructure']['range-ip-master']
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-kubernetes")
예제 #12
0
def delete_terraform_stack(cluster_uuid, project_id, dir_path, state_path, proejct_deleted):
    state_file = "{}/{}.tfstate".format(state_path, cluster_uuid)
    tf_vars_file = "{}/vars.tf".format(state_path)
    tf = Terraform(dir_path)
    return_code, stdout, stderr = tf.get(capture_output=False)
    return_code, stdout, stderr = tf.init(capture_output=False)
    return_code, stdout, stderr = tf.destroy(var_file=tf_vars_file, auto_approve=IsFlagged, capture_output=False,
                                             state=state_file)

    shutil.rmtree(state_path)
    if proejct_deleted:
        shutil.rmtree("{}/{}".format(dir_path, project_id))

    return return_code, stdout, stderr
예제 #13
0
def destroy():
    tf = Terraform(working_dir='./terraform')
    parser = SafeConfigParser()
    parser = SafeConfigParser()
    config = os.path.expanduser('~/.aws/config')
    parser.read(config)
    if not parser.has_section('profile pollexy'):
        print "You need to run 'pollexy credentials configure'"
        return
    region = parser.get('profile pollexy', 'region')
    print 'Destroying environment . . . '
    code, stdout, stderr = tf.destroy(var={'aws_region': region})
    if (stderr):
        print stderr
    else:
        print stdout
예제 #14
0
def delete_data(plateform, admin_password, app_password, unique_id):
    tf = Terraform(working_dir='terraform/layer-data')
    code, _, _ = tf.cmd("workspace select " + plateform['name'],
                        capture_output=False,
                        no_color=IsNotFlagged,
                        skip_plan=IsNotFlagged)
    code, _, _ = tf.destroy(var={
        'region':
        plateform['region'],
        'remote_bucket':
        plateform['remote-bucket'],
        'prefix_bucket':
        plateform['prefix-bucket'],
        'gcp-project':
        plateform['gcp-project'],
        'database_version':
        plateform['infrastructure']['cloudsql']['version'],
        'database_instance_type':
        plateform['infrastructure']['cloudsql']['instance-type'],
        'database_disk_size':
        plateform['infrastructure']['cloudsql']['disk-size'],
        'admin_password':
        admin_password,
        'app_password':
        app_password,
        "unique_id":
        unique_id,
        'env':
        plateform['type']
    },
                            capture_output=False,
                            no_color=IsNotFlagged,
                            skip_plan=IsNotFlagged,
                            auto_approve=True)

    if code != 0:
        raise Exception("error in Terraform layer-data")
예제 #15
0
class TerraformUtils:
    VAR_FILE = "terraform.tfvars.json"
    STATE_FILE = "terraform.tfstate"

    def __init__(self, working_dir: str, terraform_init: bool = True):
        log.info("TF FOLDER %s ", working_dir)
        self.working_dir = working_dir
        self.var_file_path = os.path.join(working_dir, self.VAR_FILE)
        self.tf = Terraform(working_dir=working_dir,
                            state=self.STATE_FILE,
                            var_file=self.VAR_FILE)

        if terraform_init:
            self.init_tf()

    @retry(exceptions=TerraformCommandError, tries=10, delay=10)
    def init_tf(self) -> None:
        self.tf.cmd("init", raise_on_error=True, capture_output=True)

    def select_defined_variables(self, **kwargs):
        supported_variables = self.get_variable_list()
        return {
            k: v
            for k, v in kwargs.items() if v and k in supported_variables
        }

    def get_variable_list(self):
        results = list()

        for tf_file in pathlib.Path(self.working_dir).glob("*.tf"):
            with open(tf_file, "r") as fp:
                terraform_file_dict = hcl2.load(fp)
                results += terraform_file_dict[
                    "variable"] if "variable" in terraform_file_dict else list(
                    )

        return list(map(lambda d: next(iter(d)), results))

    def apply(self, refresh: bool = True) -> None:
        return_value, output, err = self.tf.apply(no_color=IsFlagged,
                                                  refresh=refresh,
                                                  input=False,
                                                  skip_plan=True)
        if return_value != 0:
            message = f"Terraform apply failed with return value {return_value}, output {output} , error {err}"
            log.error(message)
            raise Exception(message)

    def set_and_apply(self, refresh: bool = True, **kwargs) -> None:
        defined_variables = self.select_defined_variables(**kwargs)
        self.change_variables(defined_variables)
        self.init_tf()
        self.apply(refresh=refresh)

    def change_variables(self,
                         variables: Dict[str, str],
                         refresh: bool = True) -> None:
        with open(self.var_file_path, "r+") as _file:
            tfvars = json.load(_file)
            tfvars.update(variables)
            _file.seek(0)
            _file.truncate()
            json.dump(tfvars, _file)
        self.apply(refresh=refresh)

    def get_state(self) -> Tfstate:
        self.tf.read_state_file(self.STATE_FILE)
        return self.tf.tfstate

    def get_resources(self, resource_type: str = None) -> List[Dict[str, Any]]:
        state = self.get_state()
        return [
            resource for resource in state.resources
            if resource_type is None or resource["type"] == resource_type
        ]

    def set_new_vips(self, api_vip: str, ingress_vip: str) -> None:
        self.change_variables(variables={
            "api_vip": api_vip,
            "ingress_vip": ingress_vip
        },
                              refresh=True)

    def destroy(self) -> None:
        self.tf.destroy(force=True, input=False, auto_approve=True)
예제 #16
0
    parser.add_argument("--lambdas")

    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()

    tf = Terraform(working_dir=args.workdir)
    tf.init()

    if args.apply:
        for lf in args.lambdas.split(","):
            name = lf.split("/")[-1].split(".")[0]
            zip_lambda_function(lf, f"{args.workdir}/{name}.zip")

        tf.apply(
            no_color=IsFlagged,
            refresh=False,
            var_file=args.tfvars,
            skip_plan=True,
            capture_output=False,
        )

    elif args.destroy:
        tf.destroy(
            no_color=IsFlagged, var_file=args.tfvars, capture_output=False
        )
    else:
        raise ValueError("Action not specified.")
예제 #17
0
class Component:

  blocname = ""
  component_name = ""
  workspace = ""
  
  def __init__(self, plateform):
    self.plateform = plateform
    self.get_constantes()
    if self.blocname not in plateform:
      return
    self.check()
    self.define_var()
    self.workspace = self.plateform_name

  def get_constantes(self):
    self.bucket_component_state = self.plateform['bucket-component-state']
    self.plateform_name = self.plateform['name']

    if 'region' not in self.plateform:
      self.plateform['region']='eu-west-1'

    self.region = self.plateform['region']

  def get_workspace(self):
    return self.workspace

  def define_var(self):
    self.var = {}

  def apply(self):
    pass

  def destroy(self):
    pass

  # to check:
  # - dependancies (exemple: rds need network)
  # - yaml validation for component
  def check(self):
    pass

  def init(self, working_dir):
    self.tf = Terraform(working_dir)
    self.tf.cmd(
      "init -backend-config=bucket=" + self.bucket_component_state + " -backend-config=region=" + self.region,
      capture_output=True,
      no_color=IsNotFlagged
    )

  def create(self, working_dir, var_component, skip_plan=True, workspace_name=""):

    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    if os.path.exists(working_dir+"/.terraform/environment"):
      os.remove(working_dir+"/.terraform/environment")
    else:
      print("File environment not exist")
    
    if os.path.exists(working_dir+"/.terraform/terraform.tfstate"):
      os.remove(working_dir+"/.terraform/terraform.tfstate")
    else:
      print("File terraform.tfstate not exist")
    
    self.init(working_dir=working_dir)
    
    # select workspace
    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      self.tf.cmd("workspace new " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    
    # terraform apply
    code, _, _ = tf.apply(
      var=var_component, 
      capture_output=False, 
      no_color=IsNotFlagged, 
      skip_plan=skip_plan,
      auto_approve=True)
    if code != 0:
      raise Exception("error in component: " + self.component_name)

  def delete(self, working_dir, var_component, skip_plan=True, workspace_name=""):
    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    if os.path.exists(working_dir+"/.terraform/environment"):
      os.remove(working_dir+"/.terraform/environment")
    else:
      print("File environment not exist")

    if os.path.exists(working_dir+"/.terraform/terraform.tfstate"):
      os.remove(working_dir+"/.terraform/terraform.tfstate")
    else:
      print("File terraform.tfstate not exist")
    
    self.init(working_dir=working_dir)
    
    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      print("workspace does not exist")
    else:
      code, _, _ = self.tf.destroy(
        var=var_component,
        capture_output=False, 
        no_color=IsNotFlagged, 
        skip_plan=IsNotFlagged,
        auto_approve=True)
      if code != 0:
        raise Exception("error in component: " + self.component_name)

  def output(self, var_name, working_dir, skip_plan=True, workspace_name=""):

    if len(workspace_name) == 0:
      workspace_name = self.get_workspace()

    print("search output : " + var_name)

    self.tf = Terraform(working_dir)

    out = ''

    code, _, _ = self.tf.cmd("workspace select " + workspace_name, capture_output=False, no_color=IsNotFlagged, skip_plan=IsNotFlagged)
    if code == 1:
      print("workspace does not exist")
    else:
      code, out, _ = self.tf.cmd(
        "output " + var_name,
        no_color=IsNotFlagged)
      if code != 0:
        raise Exception("error in component: " + self.component_name)

    return out
예제 #18
0
class Terraform:
    DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock'
    DEFAULT_DOCKER_ENTRYPOINT_PATH = '/docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = '/nginx.docker-entrypoint.sh'
    DEFAULT_NGINX_DOCKER_IMAGE = 'nginx:stable-alpine'
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = '/usr/share/nginx/html'
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = '******'
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = 'file'

    # trick for terrascript
    class null_resource(Resource):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace('default')
        self.app.cmd('workspace delete', workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode='wt',
                                             suffix='.tf.json',
                                             dir=dir_path,
                                             delete=False)
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path,
            plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            print(config)
            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)
            print(output_result)
            output_var = {
                output_var_key: output_result[output_var_key]['value']
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            f"server",
            image=config_data['os_code'],
            name=config_data['hostname'],
            region=config_data['region_code'],
            size=config_data['plan_code'],
            ssh_keys=config_data['ssh_keys']
            if config_data.get('ssh_keys') else [])
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource['ssh_keys'] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            'ip', value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output('server_id',
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(f"server",
                                        plan_id=config_data['plan_code'],
                                        region_id=config_data['region_code'],
                                        os_id=config_data['os_code'],
                                        hostname=config_data['hostname'],
                                        ssh_key_ids=config_data['ssh_keys']
                                        if config_data.get('ssh_keys') else [])
        vultr_output_ip = Output('ip', value="${vultr_server.server.main_ip}")
        vultr_output_id = Output('server_id',
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key('vultr_ssh_key',
                                              name='default_key',
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner("remote-exec",
                           provisioner=provisioner(
                               "remote-exec",
                               inline=[
                                   'mkdir -p ~/.ssh',
                                   f"{public_key} >> ~/.ssh/authorized_keys"
                               ],
                           ))

    @classmethod
    def gen_ssh_conn_config(cls,
                            *,
                            ssh_user: str = DEFAULT_SSH_USER,
                            ssh_private_key: str,
                            ssh_host: str,
                            ssh_port: int = DEFAULT_SSH_PORT) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            'type': 'ssh',
            'user': ssh_user,
            'private_key': ssh_private_key,
            'host': ssh_host,
            'port': ssh_port,
            'timeout': '30s'
        }

    @classmethod
    def gen_site_docker_deploy_config(cls,
                                      *,
                                      docker_host: str = DEFAULT_DOCKER_HOST,
                                      site_name: str = None,
                                      template_tar_bytes: bytes = None,
                                      script: str = None,
                                      ssh_user: str = DEFAULT_SSH_USER,
                                      ssh_private_key: str,
                                      ssh_host: str,
                                      ssh_port: int = DEFAULT_SSH_PORT):
        config = Terrascript()
        docker_provider = provider.docker(host=docker_host,
                                          connection=cls.gen_ssh_conn_config(
                                              ssh_user=ssh_user,
                                              ssh_private_key=ssh_private_key,
                                              ssh_host=ssh_host,
                                              ssh_port=ssh_port))
        docker_image_resource = resource.docker_image(
            'nginx_image',
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            'nginx_container',
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={'internal': 80},
            upload=[])
        docker_name_resource = resource.random_pet(
            'docker_pet_name',
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz",
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode('utf8')
            template_tar_path = f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource['upload'].append({
                'content_base64':
                template_tar_file_content,
                'file':
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH)
            docker_container_resource['upload'].append({
                'content':
                entrypoint_sh_content,
                'file':
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(self,
                    *,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        exec_resource = self.null_resource('remote-exec',
                                           provisioner=provisioner(
                                               "remote-exec",
                                               inline=['ls -la'],
                                               connection=ssh_conn))

        exec_config += exec_resource
        return exec_config

    def upload_file(self,
                    content: str,
                    *,
                    destination: str = DEFAULT_UPLOAD_PATH,
                    ssh_user: str = DEFAULT_SSH_USER,
                    ssh_private_key: str,
                    ssh_host: str,
                    ssh_port: int = DEFAULT_SSH_PORT):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(ssh_user=ssh_user,
                                            ssh_private_key=ssh_private_key,
                                            ssh_host=ssh_host,
                                            ssh_port=ssh_port)
        file_resource = self.null_resource('upload_file_resource',
                                           provisioner=provisioner(
                                               self.TERRAFORM_RESOURCE_FILE,
                                               content=content,
                                               destination=destination,
                                               connection=ssh_conn))

        upload_config += file_resource
        return upload_config
예제 #19
0
#!python

from python_terraform import Terraform, IsFlagged

tf_base = Terraform(working_dir='iac/layer-base')
tf_users = Terraform(working_dir='iac/layer-users')

tf_users.destroy(force=IsFlagged, capture_output=False)
tf_base.destroy(force=IsFlagged, capture_output=False)
예제 #20
0
class Terraform:
    DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
    DEFAULT_DOCKER_ENTRYPOINT_PATH = "/docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH = "/nginx.docker-entrypoint.sh"
    DEFAULT_NGINX_DOCKER_IMAGE = "nginx:stable-alpine"
    DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH = "/usr/share/nginx/html"
    DEFAULT_UPLOAD_PATH = f"$HOME/.{PROJECT_NAME}/"
    DEFAULT_SSH_USER = "******"
    DEFAULT_SSH_PORT = 22

    TERRAFORM_RESOURCE_FILE = "file"

    # trick for terrascript
    class null_resource(Resource):
        ...

    class tencentcloud(Provider):
        ...

    class tencentcloud_availability_zones(Data):
        ...

    class tencentcloud_images(Data):
        ...

    class tencentcloud_instance_types(Data):
        ...

    class tencentcloud_security_group(Resource):
        ...

    class tencentcloud_security_group_lite_rule(Resource):
        ...

    class tencentcloud_instance(Resource):
        ...

    class tencentcloud_key_pair(Resource):
        ...

    class alicloud(Provider):
        ...

    class alicloud_vpc(Resource):
        ...

    class alicloud_key_pair(Resource):
        ...

    class alicloud_security_group(Resource):
        ...

    class alicloud_security_group_rule(Resource):
        ...

    class alicloud_instance(Resource):
        ...

    class alicloud_vswitch(Resource):
        ...

    class alicloud_zones(Data):
        ...

    class vultr(Provider):
        ...

    class vultr_server(Resource):
        ...

    class vultr_ssh_key(Resource):
        ...

    def __init__(self):
        self.work_dir = TERRAFORM_WORK_DIR
        self.app = TF(working_dir=self.work_dir)

    @contextlib.contextmanager
    def terraform_workspace(self):
        workspace = f"terraform_workspace_{int(time.time())}"
        self.app.create_workspace(workspace)
        tmp_dir = TemporaryDirectory()

        yield tmp_dir.name
        self.app.set_workspace("default")
        self.app.cmd("workspace delete", workspace, force=IsFlagged)

    @contextlib.contextmanager
    def patch_terraform_docker_ssh_conn(self):
        # patch ssh config
        yield
        # clear ssh config

    def write_terraform_config(self, config: Terrascript, dir_path: str):
        tmp_config_file = NamedTemporaryFile(mode="wt",
                                             suffix=".tf.json",
                                             dir=dir_path,
                                             delete=False)
        logging.info(str(config))
        tmp_config_file.write(str(config))
        tmp_config_file.seek(0)

        self.app.init(
            dir_path
            # disable maual plugin because it changes toooo fast
            # dir_path, plugin_dir=f"{self.work_dir}/plugins",
        )

        return tmp_config_file

    def run_terraform_plan(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            plan = self.app.plan(tw_dir, no_color=IsFlagged)
        return plan

    def run_terraform_apply(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)

            self.app.apply(tw_dir, skip_plan=True, no_color=IsFlagged)
            output_result = self.app.output(json=IsFlagged, no_color=IsFlagged)

            output_var = {
                output_var_key: output_result[output_var_key]["value"]
                for output_var_key in output_result
            }

        return output_var

    def run_terraform_destroy(self, config: Terrascript):
        with self.terraform_workspace() as tw_dir:
            self.write_terraform_config(config, tw_dir)
            destroy_result = self.app.destroy(tw_dir)
        return destroy_result

    @classmethod
    def gen_digital_ocean_config(cls,
                                 config_data: dict,
                                 token: str,
                                 public_key: str = None):
        do_config = Terrascript()
        do_provider = provider.digitalocean(token=token)
        do_droplet_resource = resource.digitalocean_droplet(
            "server",
            image=config_data["os_code"],
            name=config_data["hostname"],
            region=config_data["region_code"],
            size=config_data["plan_code"],
            ssh_keys=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        if public_key:
            digitalocean_ssh_key = resource.digitalocean_ssh_key(
                "digitalocean_ssh_key",
                name="default",
                public_key=public_key,
            )

            do_droplet_resource["ssh_keys"] += [
                "${digitalocean_ssh_key.digitalocean_ssh_key.id}"
            ]
            do_config += digitalocean_ssh_key

        do_output_ip = Output(
            "ip", value="${digitalocean_droplet.server.ipv4_address}")
        do_output_id = Output("server_id",
                              value="${digitalocean_droplet.server.id}")
        do_config += do_provider
        do_config += do_droplet_resource
        do_config += do_output_ip
        do_config += do_output_id

        return do_config

    @classmethod
    def gen_vultr_config(cls,
                         config_data: dict,
                         token: str,
                         public_key: str = None):
        vultr_config = Terrascript()
        vultr_provider = cls.vultr(api_key=token,
                                   rate_limit=700,
                                   retry_limit=3)

        vultr_server = cls.vultr_server(
            "server",
            plan_id=config_data["plan_code"],
            region_id=config_data["region_code"],
            os_id=config_data["os_code"],
            hostname=config_data["hostname"],
            ssh_key_ids=config_data["ssh_keys"]
            if config_data.get("ssh_keys") else [],
        )
        vultr_output_ip = Output("ip", value="${vultr_server.server.main_ip}")
        vultr_output_id = Output("server_id",
                                 value="${vultr_server.server.id}")

        if public_key:
            vultr_ssh_key = cls.vultr_ssh_key("vultr_ssh_key",
                                              name="default_key",
                                              ssh_key=public_key)

            vultr_server["ssh_key_ids"] += [
                "${vultr_ssh_key.vultr_ssh_key.id}"
            ]
            vultr_config += vultr_ssh_key

        vultr_config += vultr_provider
        vultr_config += vultr_server
        vultr_config += vultr_output_ip
        vultr_config += vultr_output_id

        return vultr_config

    @classmethod
    def gen_tencent_cloud_config(
        cls,
        config_data: dict,
        token: str,
        public_key_name: str = None,
        secret_id: str = None,
    ):
        tencent_cloud_config = Terrascript()
        tencent_terraform = terraform(
            **{
                "required_providers": {
                    "tencentcloud": {
                        "source": "terraform-providers/tencentcloud",
                        "version": "~> 1.40.3",
                    },
                }
            })

        tencent_cloud_provider = cls.tencentcloud(
            secret_id=secret_id,
            secret_key=token,
            region=config_data["region_code"],
        )
        tencent_zone = cls.tencentcloud_availability_zones("default")
        tencent_security_group = cls.tencentcloud_security_group(
            "default", name="all-open", description="open all ports")
        tencent_security_group_rule = cls.tencentcloud_security_group_lite_rule(
            "rule",
            security_group_id="${tencentcloud_security_group.default.id}",
            ingress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
            egress=[
                "ACCEPT#10.0.0.0/8#ALL#ALL",
            ],
        )
        tencent_cloud_server = cls.tencentcloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone=
            "${data.tencentcloud_availability_zones.default.zones.0.name}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            disable_monitor_service=True,
            disable_security_service=True,
            allocate_public_ip=True,
            internet_max_bandwidth_out=5,
            instance_charge_type="POSTPAID_BY_HOUR",
            internet_charge_type="TRAFFIC_POSTPAID_BY_HOUR",
            system_disk_type="CLOUD_SSD",
            count=1,
        )
        tencent_output_ip = Output(
            "ip", value="${tencentcloud_instance.server.0.public_ip}")
        tencent_output_id = Output(
            "server_id", value="${tencentcloud_instance.server.0.id}")

        if public_key_name:
            tencent_cloud_server["key_name"] = public_key_name

        tencent_cloud_config += tencent_terraform
        tencent_cloud_config += tencent_cloud_provider
        tencent_cloud_config += tencent_zone
        tencent_cloud_config += tencent_security_group
        tencent_cloud_config += tencent_security_group_rule
        tencent_cloud_config += tencent_cloud_server
        tencent_cloud_config += tencent_output_ip
        tencent_cloud_config += tencent_output_id

        return tencent_cloud_config

    @classmethod
    def gen_ali_cloud_config(
        cls,
        config_data: dict,
        token: str,
        ssh_key_name: str = None,
        access_key: str = None,
        security_groups: List[str] = [],
    ):
        ali_cloud_config = Terrascript()
        ali_cloud_provider = cls.alicloud(
            access_key=access_key,
            secret_key=token,
            region=config_data["region_code"],
        )

        ali_zone = cls.alicloud_zones(
            "default",
            available_disk_category="cloud_efficiency",
            available_resource_creation="Instance",
        )
        ali_vpc = cls.alicloud_vpc(
            "vpc",
            cidr_block="172.16.0.0/12",
        )
        ali_vswitch = cls.alicloud_vswitch(
            "vswitch",
            vpc_id="${alicloud_vpc.vpc.id}",
            cidr_block="172.16.0.0/29",
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
        )
        ali_security_group = cls.alicloud_security_group(
            "group",
            name="all-open",
            vpc_id="${alicloud_vpc.vpc.id}",
            description="open all ports",
            inner_access_policy="Accept",
        )
        ali_internet_security_group_rule = cls.alicloud_security_group_rule(
            "internet",
            # nic_type="internet",
            security_group_id="${alicloud_security_group.group.id}",
            type="ingress",
            port_range="-1/-1",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
        )
        ali_intranet_security_group_rule = cls.alicloud_security_group_rule(
            "intranet",
            # nic_type="intranet",
            security_group_id="${alicloud_security_group.group.id}",
            port_range="-1/-1",
            type="egress",
            cidr_ip="0.0.0.0/0",
            ip_protocol="all",
            policy="accept",
            priority=1,
        )
        ali_cloud_server = cls.alicloud_instance(
            "server",
            instance_name=config_data["hostname"],
            availability_zone="${data.alicloud_zones.default.zones.0.id}",
            # security_groups=security_groups,
            security_groups="${alicloud_security_group.group.*.id}",
            vswitch_id="${alicloud_vswitch.vswitch.id}",
            image_id=config_data["os_code"],
            instance_type=config_data["plan_code"],
            security_enhancement_strategy="Deactive",
            instance_charge_type="PostPaid",
            internet_charge_type="PayByTraffic",
            internet_max_bandwidth_out=2,
        )
        ali_output_ip = Output("ip",
                               value="${alicloud_instance.server.public_ip}")
        ali_output_id = Output("server_id",
                               value="${alicloud_instance.server.id}")

        if ssh_key_name:
            ali_cloud_server["key_name"] = ssh_key_name

        ali_cloud_config += ali_cloud_provider
        ali_cloud_config += ali_zone
        ali_cloud_config += ali_vpc
        ali_cloud_config += ali_vswitch
        ali_cloud_config += ali_security_group
        ali_cloud_config += ali_internet_security_group_rule
        ali_cloud_config += ali_intranet_security_group_rule
        ali_cloud_config += ali_cloud_server
        ali_cloud_config += ali_output_ip
        ali_cloud_config += ali_output_id

        return ali_cloud_config

    @classmethod
    def add_ssh_key_config(cls, public_key: str):
        return provisioner(
            "remote-exec",
            provisioner=provisioner(
                "remote-exec",
                inline=[
                    "mkdir -p ~/.ssh",
                    f"{public_key} >> ~/.ssh/authorized_keys"
                ],
            ),
        )

    @classmethod
    def gen_ssh_conn_config(
        cls,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ) -> dict:

        # see more in https://www.terraform.io/docs/provisioners/connection.html
        return {
            "type": "ssh",
            "user": ssh_user,
            "private_key": ssh_private_key,
            "host": ssh_host,
            "port": ssh_port,
            "timeout": "30s",
        }

    @classmethod
    def gen_site_docker_deploy_config(
        cls,
        *,
        docker_host: str = DEFAULT_DOCKER_HOST,
        site_name: str = None,
        template_tar_bytes: bytes = None,
        script: str = None,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        config = Terrascript()
        docker_provider = provider.docker(
            host=docker_host,
            connection=cls.gen_ssh_conn_config(
                ssh_user=ssh_user,
                ssh_private_key=ssh_private_key,
                ssh_host=ssh_host,
                ssh_port=ssh_port,
            ),
        )
        docker_image_resource = resource.docker_image(
            "nginx_image",
            name=cls.DEFAULT_NGINX_DOCKER_IMAGE,
        )
        docker_container_resource = resource.docker_container(
            "nginx_container",
            name=f"{site_name}-container-${{random_pet.docker_pet_name.id}}",
            image="${docker_image.nginx_image.latest}",
            restart="always",
            start=True,
            ports={"internal": 80},
            upload=[],
        )
        docker_name_resource = resource.random_pet(
            "docker_pet_name",
            length=1,
        )

        if template_tar_bytes:
            template_tar_file = (
                f"{site_name}-tar-${{random_pet.docker_pet_name.id}}.tar.gz", )
            template_tar_file_content = base64.b64encode(
                template_tar_bytes).decode("utf8")
            template_tar_path = (
                f"{cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH}/${template_tar_file}"
            )
            # self.upload_file(
            #     content='conf/myapp.conf',
            #     destination=f"{self.DEFAULT_UPLOAD_PATH}/${template_tar_file}",
            #     ssh_user=ssh_user,
            #     ssh_private_key=ssh_private_key,
            #     ssh_host=ssh_host,
            #     ssh_port=ssh_port
            # )
            docker_container_resource["upload"].append({
                "content_base64":
                template_tar_file_content,
                "file":
                template_tar_path
            })

        if script:
            entrypoint_sh_content = TemplateRender().render(
                cls.DEFAULT_NGINX_DOCKER_ENTRYPOINT_PATH,
                init_script_path=cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
                html_path=cls.DEFAULT_NGINX_DOCKER_CONTAINER_HTML_PATH,
            )
            docker_container_resource["upload"].append({
                "content":
                entrypoint_sh_content,
                "file":
                cls.DEFAULT_DOCKER_ENTRYPOINT_PATH,
            })

        config += docker_provider
        config += docker_image_resource
        config += docker_container_resource
        config += docker_name_resource

        return config

    def remote_exec(
        self,
        *,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        exec_config = Terrascript()
        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        exec_resource = self.null_resource(
            "remote-exec",
            provisioner=provisioner("remote-exec",
                                    inline=["ls -la"],
                                    connection=ssh_conn),
        )

        exec_config += exec_resource
        return exec_config

    def upload_file(
        self,
        content: str,
        *,
        destination: str = DEFAULT_UPLOAD_PATH,
        ssh_user: str = DEFAULT_SSH_USER,
        ssh_private_key: str,
        ssh_host: str,
        ssh_port: int = DEFAULT_SSH_PORT,
    ):
        upload_config = Terrascript()

        ssh_conn = self.gen_ssh_conn_config(
            ssh_user=ssh_user,
            ssh_private_key=ssh_private_key,
            ssh_host=ssh_host,
            ssh_port=ssh_port,
        )
        file_resource = self.null_resource(
            "upload_file_resource",
            provisioner=provisioner(
                self.TERRAFORM_RESOURCE_FILE,
                content=content,
                destination=destination,
                connection=ssh_conn,
            ),
        )

        upload_config += file_resource
        return upload_config