def generate_terraform_vars_and_update_machine_conf(): """ Generates the terraform.tfvars file """ ocp_version = get_ocp_version() folder_structure = False if Version.coerce(ocp_version) >= Version.coerce("4.5"): folder_structure = True # export AWS_REGION set_aws_region() # generate terraform variable file generate_terraform_vars_with_folder() # update the machine configurations update_machine_conf(folder_structure) if Version.coerce(ocp_version) >= Version.coerce("4.5"): modify_haproxyservice() else: # generate terraform variable file generate_terraform_vars_with_out_folder() # update the machine configurations update_machine_conf(folder_structure)
def destroy_cluster(self, log_level="DEBUG"): """ Destroy OCP cluster specific to vSphere UPI Args: log_level (str): log level openshift-installer (default: DEBUG) """ previous_dir = os.getcwd() # Download terraform binary based on terraform version # in terraform.log terraform_log_path = os.path.join(config.ENV_DATA.get('cluster_path'), config.ENV_DATA.get('TF_LOG_FILE')) # check for terraform.log, this check is for partially # deployed clusters try: with open(terraform_log_path, 'r') as fd: logger.debug( f"Reading terraform version from {terraform_log_path}") version_line = fd.readline() terraform_version = version_line.split()[-1] except FileNotFoundError: logger.debug(f"{terraform_log_path} file not found") terraform_version = config.DEPLOYMENT['terraform_version'] terraform_installer = get_terraform(version=terraform_version) config.ENV_DATA['terraform_installer'] = terraform_installer # getting OCP version here since we run destroy job as # separate job in jenkins ocp_version = get_ocp_version() self.folder_structure = False if Version.coerce(ocp_version) >= Version.coerce('4.5'): set_aws_region() self.folder_structure = True config.ENV_DATA['folder_structure'] = self.folder_structure # delete the extra disks self.delete_disks() # check whether cluster has scale-up nodes scale_up_terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR) scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir, "scale_up_terraform.tfvars") if os.path.exists(scale_up_terraform_var): os.chdir(scale_up_terraform_data_dir) self.destroy_scaleup_nodes(scale_up_terraform_data_dir, scale_up_terraform_var) os.chdir(previous_dir) terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR) upi_repo_path = os.path.join( constants.EXTERNAL_DIR, 'installer', ) tfvars = os.path.join(config.ENV_DATA.get('cluster_path'), constants.TERRAFORM_DATA_DIR, constants.TERRAFORM_VARS) clone_openshift_installer() if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup") and os.path.exists(f"{constants.VSPHERE_MAIN}.json")): os.rename(f"{constants.VSPHERE_MAIN}.json", f"{constants.VSPHERE_MAIN}.json.backup") # terraform initialization and destroy cluster terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/")) os.chdir(terraform_data_dir) if Version.coerce(ocp_version) >= Version.coerce('4.6'): terraform.initialize() else: terraform.initialize(upgrade=True) terraform.destroy(tfvars, refresh=(not self.folder_structure)) os.chdir(previous_dir) # post destroy checks self.post_destroy_checks()
def destroy_cluster(self, log_level="DEBUG"): """ Destroy OCP cluster specific to vSphere UPI Args: log_level (str): log level openshift-installer (default: DEBUG) """ previous_dir = os.getcwd() # Download terraform binary based on terraform version # in terraform.log terraform_log_path = os.path.join(config.ENV_DATA.get("cluster_path"), config.ENV_DATA.get("TF_LOG_FILE")) # check for terraform.log, this check is for partially # deployed clusters try: with open(terraform_log_path, "r") as fd: logger.debug( f"Reading terraform version from {terraform_log_path}") version_line = fd.readline() terraform_version = version_line.split()[-1] except FileNotFoundError: logger.debug(f"{terraform_log_path} file not found") terraform_version = config.DEPLOYMENT["terraform_version"] terraform_installer = get_terraform(version=terraform_version) config.ENV_DATA["terraform_installer"] = terraform_installer # getting OCP version here since we run destroy job as # separate job in jenkins ocp_version = get_ocp_version() self.folder_structure = False if Version.coerce(ocp_version) >= Version.coerce("4.5"): set_aws_region() self.folder_structure = True config.ENV_DATA["folder_structure"] = self.folder_structure # delete the extra disks self.delete_disks() # check whether cluster has scale-up nodes scale_up_terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR, ) scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir, "scale_up_terraform.tfvars") if os.path.exists(scale_up_terraform_var): os.chdir(scale_up_terraform_data_dir) self.destroy_scaleup_nodes(scale_up_terraform_data_dir, scale_up_terraform_var) os.chdir(previous_dir) terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR) upi_repo_path = os.path.join( constants.EXTERNAL_DIR, "installer", ) tfvars = os.path.join( config.ENV_DATA.get("cluster_path"), constants.TERRAFORM_DATA_DIR, constants.TERRAFORM_VARS, ) clone_openshift_installer() if os.path.exists( f"{constants.VSPHERE_MAIN}.backup") and os.path.exists( f"{constants.VSPHERE_MAIN}.json"): os.rename( f"{constants.VSPHERE_MAIN}.json", f"{constants.VSPHERE_MAIN}.json.backup", ) # terraform initialization and destroy cluster terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/")) os.chdir(terraform_data_dir) if Version.coerce(ocp_version) >= Version.coerce("4.6"): # Download terraform ignition provider. For OCP upgrade clusters, # ignition provider doesn't exist, so downloading in destroy job # as well terraform_plugins_path = ".terraform/plugins/linux_amd64/" terraform_ignition_provider_path = os.path.join( terraform_data_dir, terraform_plugins_path, "terraform-provider-ignition", ) # check the upgrade history of cluster and checkout to the # original installer release. This is due to the issue of not # supporting terraform state of OCP 4.5 in installer # release of 4.6 branch. More details in # https://github.com/red-hat-storage/ocs-ci/issues/2941 is_cluster_upgraded = False try: upgrade_history = get_ocp_upgrade_history() if len(upgrade_history) > 1: is_cluster_upgraded = True original_installed_ocp_version = upgrade_history[-1] installer_release_branch = ( f"release-{original_installed_ocp_version[0:3]}") clone_repo( constants.VSPHERE_INSTALLER_REPO, upi_repo_path, installer_release_branch, ) except Exception as ex: logger.error(ex) if not (os.path.exists(terraform_ignition_provider_path) or is_cluster_upgraded): get_terraform_ignition_provider(terraform_data_dir) terraform.initialize() else: terraform.initialize(upgrade=True) terraform.destroy(tfvars, refresh=(not self.folder_structure)) os.chdir(previous_dir) # post destroy checks self.post_destroy_checks()
def destroy_cluster(self, log_level="DEBUG"): """ Destroy OCP cluster specific to vSphere UPI Args: log_level (str): log level openshift-installer (default: DEBUG) """ previous_dir = os.getcwd() # Download terraform binary based on ocp version and # update the installer path in ENV_DATA # use "0.11.14" for releases below OCP 4.5 # TODO: For cluster installed by old version of terraform we need to # still run old version for destroy ( upgrade scenario ) terraform_version = config.DEPLOYMENT['terraform_version'] terraform_installer = get_terraform(version=terraform_version) config.ENV_DATA['terraform_installer'] = terraform_installer # delete the extra disks self.delete_disks() # check whether cluster has scale-up nodes scale_up_terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR, constants.SCALEUP_TERRAFORM_DATA_DIR) scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir, "scale_up_terraform.tfvars") if os.path.exists(scale_up_terraform_var): os.chdir(scale_up_terraform_data_dir) self.destroy_scaleup_nodes(scale_up_terraform_data_dir, scale_up_terraform_var) os.chdir(previous_dir) terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR) upi_repo_path = os.path.join( constants.EXTERNAL_DIR, 'installer', ) tfvars = os.path.join(config.ENV_DATA.get('cluster_path'), constants.TERRAFORM_DATA_DIR, constants.TERRAFORM_VARS) clone_openshift_installer() if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup") and os.path.exists(f"{constants.VSPHERE_MAIN}.json")): os.rename(f"{constants.VSPHERE_MAIN}.json", f"{constants.VSPHERE_MAIN}.json.backup") # getting OCP version here since we run destroy job as # separate job in jenkins ocp_version = get_ocp_version() self.folder_structure = False if Version.coerce(ocp_version) >= Version.coerce('4.5'): set_aws_region() self.folder_structure = True # terraform initialization and destroy cluster terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/")) os.chdir(terraform_data_dir) terraform.initialize(upgrade=True) terraform.destroy(tfvars, refresh=(not self.folder_structure)) os.chdir(previous_dir) # post destroy checks self.post_destroy_checks()