예제 #1
0
파일: vmware.py 프로젝트: kesavanvt/ocs-ci
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              "installer")
            self.previous_dir = os.getcwd()

            # get OCP version
            ocp_version = get_ocp_version()

            # create terraform_data directory
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT["terraform_version"]
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA["terraform_installer"] = terraform_installer

            # Download terraform ignition provider
            # ignition provider dependancy from OCP 4.6
            if Version.coerce(ocp_version) >= Version.coerce("4.6"):
                get_terraform_ignition_provider(self.terraform_data_dir)

            # Initialize Terraform
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)

            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce("4.5"):
                self.folder_structure = True
                config.ENV_DATA["folder_structure"] = self.folder_structure
예제 #2
0
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              'installer')
            self.previous_dir = os.getcwd()

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT['terraform_version']
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA['terraform_installer'] = terraform_installer

            # Initialize Terraform
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)
            ocp_version = get_ocp_version()
            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce('4.5'):
                self.folder_structure = True
                config.ENV_DATA['folder_structure'] = self.folder_structure
예제 #3
0
파일: vmware.py 프로젝트: kesavanvt/ocs-ci
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on terraform version
        # in terraform.log
        terraform_log_path = os.path.join(config.ENV_DATA.get("cluster_path"),
                                          config.ENV_DATA.get("TF_LOG_FILE"))

        # check for terraform.log, this check is for partially
        # deployed clusters
        try:
            with open(terraform_log_path, "r") as fd:
                logger.debug(
                    f"Reading terraform version from {terraform_log_path}")
                version_line = fd.readline()
                terraform_version = version_line.split()[-1]
        except FileNotFoundError:
            logger.debug(f"{terraform_log_path} file not found")
            terraform_version = config.DEPLOYMENT["terraform_version"]

        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA["terraform_installer"] = terraform_installer

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce("4.5"):
            set_aws_region()
            self.folder_structure = True
            config.ENV_DATA["folder_structure"] = self.folder_structure

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR,
        )
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            "installer",
        )
        tfvars = os.path.join(
            config.ENV_DATA.get("cluster_path"),
            constants.TERRAFORM_DATA_DIR,
            constants.TERRAFORM_VARS,
        )

        clone_openshift_installer()
        if os.path.exists(
                f"{constants.VSPHERE_MAIN}.backup") and os.path.exists(
                    f"{constants.VSPHERE_MAIN}.json"):
            os.rename(
                f"{constants.VSPHERE_MAIN}.json",
                f"{constants.VSPHERE_MAIN}.json.backup",
            )

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        if Version.coerce(ocp_version) >= Version.coerce("4.6"):
            # Download terraform ignition provider. For OCP upgrade clusters,
            # ignition provider doesn't exist, so downloading in destroy job
            # as well
            terraform_plugins_path = ".terraform/plugins/linux_amd64/"
            terraform_ignition_provider_path = os.path.join(
                terraform_data_dir,
                terraform_plugins_path,
                "terraform-provider-ignition",
            )

            # check the upgrade history of cluster and checkout to the
            # original installer release. This is due to the issue of not
            # supporting terraform state of OCP 4.5 in installer
            # release of 4.6 branch. More details in
            # https://github.com/red-hat-storage/ocs-ci/issues/2941
            is_cluster_upgraded = False
            try:
                upgrade_history = get_ocp_upgrade_history()
                if len(upgrade_history) > 1:
                    is_cluster_upgraded = True
                    original_installed_ocp_version = upgrade_history[-1]
                    installer_release_branch = (
                        f"release-{original_installed_ocp_version[0:3]}")
                    clone_repo(
                        constants.VSPHERE_INSTALLER_REPO,
                        upi_repo_path,
                        installer_release_branch,
                    )
            except Exception as ex:
                logger.error(ex)

            if not (os.path.exists(terraform_ignition_provider_path)
                    or is_cluster_upgraded):
                get_terraform_ignition_provider(terraform_data_dir)
            terraform.initialize()
        else:
            terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()
예제 #4
0
    def flexy_post_processing(self):
        """
        Perform a few actions required after flexy execution:
        - update global pull-secret
        - login to mirror registry (disconected cluster)
        - configure proxy server (disconnected cluster)
        - configure ntp (if required)
        """
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get("kubeconfig_location"))

        # Update kubeconfig with proxy-url (if client_http_proxy
        # configured) to redirect client access through proxy server.
        # Since flexy-dir is already copied to cluster-dir, we will update
        # kubeconfig on both places.
        flexy_kubeconfig = os.path.join(
            self.flexy_host_dir,
            constants.FLEXY_RELATIVE_CLUSTER_DIR,
            "auth/kubeconfig",
        )
        update_kubeconfig_with_proxy_url_for_client(kubeconfig)
        update_kubeconfig_with_proxy_url_for_client(flexy_kubeconfig)

        # load cluster info
        load_cluster_info()

        # Download terraform binary based on version used by Flexy and
        # update the installer path in ENV_DATA
        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        terraform_tfstate = os.path.join(terraform_data_dir,
                                         "terraform.tfstate")
        with open(terraform_tfstate, "r") as fd:
            ttc = hcl.load(fd)
            terraform_version = ttc.get("terraform_version",
                                        config.DEPLOYMENT["terraform_version"])
        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA["terraform_installer"] = terraform_installer

        # Download terraform ignition provider
        # ignition provider dependancy from OCP 4.6
        ocp_version = get_ocp_version()
        if Version.coerce(ocp_version) >= Version.coerce("4.6"):
            get_terraform_ignition_provider(terraform_data_dir)

        # if on disconnected cluster, perform required tasks
        pull_secret_path = os.path.join(constants.DATA_DIR, "pull-secret")
        if config.DEPLOYMENT.get("disconnected"):
            # login to mirror registry
            login_to_mirror_registry(pull_secret_path)
            # configure additional allowed domains in proxy
            configure_allowed_domains_in_proxy()

        # update pull-secret
        secret_cmd = (f"oc set data secret/pull-secret "
                      f"--kubeconfig {kubeconfig} "
                      f"-n {constants.OPENSHIFT_CONFIG_NAMESPACE} "
                      f"--from-file=.dockerconfigjson={pull_secret_path}")
        exec_cmd(secret_cmd)

        if not config.ENV_DATA.get("skip_ntp_configuration", False):
            ntp_cmd = (f"oc --kubeconfig {kubeconfig} "
                       f"create -f {constants.NTP_CHRONY_CONF}")
            logger.info("Creating NTP chrony")
            exec_cmd(ntp_cmd)
        # sleep here to start update machineconfigpool status
        time.sleep(60)
        wait_for_machineconfigpool_status("all")
예제 #5
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on terraform version
        # in terraform.log
        terraform_log_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          config.ENV_DATA.get('TF_LOG_FILE'))

        # check for terraform.log, this check is for partially
        # deployed clusters
        try:
            with open(terraform_log_path, 'r') as fd:
                logger.debug(
                    f"Reading terraform version from {terraform_log_path}")
                version_line = fd.readline()
                terraform_version = version_line.split()[-1]
        except FileNotFoundError:
            logger.debug(f"{terraform_log_path} file not found")
            terraform_version = config.DEPLOYMENT['terraform_version']

        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA['terraform_installer'] = terraform_installer

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce('4.5'):
            set_aws_region()
            self.folder_structure = True
            config.ENV_DATA['folder_structure'] = self.folder_structure

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            'installer',
        )
        tfvars = os.path.join(config.ENV_DATA.get('cluster_path'),
                              constants.TERRAFORM_DATA_DIR,
                              constants.TERRAFORM_VARS)

        clone_openshift_installer()
        if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
                and os.path.exists(f"{constants.VSPHERE_MAIN}.json")):
            os.rename(f"{constants.VSPHERE_MAIN}.json",
                      f"{constants.VSPHERE_MAIN}.json.backup")

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        if Version.coerce(ocp_version) >= Version.coerce('4.6'):
            terraform.initialize()
        else:
            terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()
예제 #6
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on ocp version and
        # update the installer path in ENV_DATA
        # use "0.11.14" for releases below OCP 4.5
        # TODO: For cluster installed by old version of terraform we need to
        # still run old version for destroy ( upgrade scenario )
        terraform_version = config.DEPLOYMENT['terraform_version']
        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA['terraform_installer'] = terraform_installer

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            'installer',
        )
        tfvars = os.path.join(config.ENV_DATA.get('cluster_path'),
                              constants.TERRAFORM_DATA_DIR,
                              constants.TERRAFORM_VARS)

        clone_openshift_installer()
        if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
                and os.path.exists(f"{constants.VSPHERE_MAIN}.json")):
            os.rename(f"{constants.VSPHERE_MAIN}.json",
                      f"{constants.VSPHERE_MAIN}.json.backup")

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce('4.5'):
            set_aws_region()
            self.folder_structure = True

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()