Пример #1
0
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              "installer")
            self.previous_dir = os.getcwd()

            # get OCP version
            ocp_version = get_ocp_version()

            # create terraform_data directory
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT["terraform_version"]
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA["terraform_installer"] = terraform_installer

            # Download terraform ignition provider
            # ignition provider dependancy from OCP 4.6
            if Version.coerce(ocp_version) >= Version.coerce("4.6"):
                get_terraform_ignition_provider(self.terraform_data_dir)

            # Initialize Terraform
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)

            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce("4.5"):
                self.folder_structure = True
                config.ENV_DATA["folder_structure"] = self.folder_structure
Пример #2
0
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              'installer')
            self.previous_dir = os.getcwd()

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT['terraform_version']
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA['terraform_installer'] = terraform_installer

            # Initialize Terraform
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)
            ocp_version = get_ocp_version()
            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce('4.5'):
                self.folder_structure = True
                config.ENV_DATA['folder_structure'] = self.folder_structure
Пример #3
0
 def __init__(self):
     super(VSPHEREUPI.OCPDeployment, self).__init__()
     self.public_key = {}
     self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                       'installer')
     self.previous_dir = os.getcwd()
     self.terraform_data_dir = os.path.join(
         self.cluster_path, constants.TERRAFORM_DATA_DIR)
     create_directory_path(self.terraform_data_dir)
     self.terraform_work_dir = constants.VSPHERE_DIR
     self.terraform = Terraform(self.terraform_work_dir)
Пример #4
0
    def add_nodes(self):
        """
        Add new nodes to the cluster
        """
        # create separate directory for scale-up terraform data
        scaleup_terraform_data_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        create_directory_path(scaleup_terraform_data_dir)
        logger.info(
            f"scale-up terraform data directory: {scaleup_terraform_data_dir}")

        # git clone repo from openshift-misc
        clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path)

        # modify scale-up repo
        self.modify_scaleup_repo()

        config.ENV_DATA['vsphere_resource_pool'] = config.ENV_DATA.get(
            "cluster_name")

        # sync guest time with host
        if config.ENV_DATA.get('sync_time_with_host'):
            sync_time_with_host(constants.SCALEUP_VSPHERE_MACHINE_CONF, True)

        # get the RHCOS worker list
        self.rhcos_ips = get_node_ips()
        logger.info(f"RHCOS IP's: {json.dumps(self.rhcos_ips)}")

        # generate terraform variable for scaling nodes
        self.generate_terraform_vars_for_scaleup()

        # Add nodes using terraform
        scaleup_terraform = Terraform(constants.SCALEUP_VSPHERE_DIR)
        previous_dir = os.getcwd()
        os.chdir(scaleup_terraform_data_dir)
        scaleup_terraform.initialize()
        scaleup_terraform.apply(self.scale_up_terraform_var)
        scaleup_terraform_tfstate = os.path.join(scaleup_terraform_data_dir,
                                                 "terraform.tfstate")
        out = scaleup_terraform.output(scaleup_terraform_tfstate,
                                       "rhel_worker")
        rhel_worker_nodes = json.loads(out)['value']
        logger.info(f"RHEL worker nodes: {rhel_worker_nodes}")
        os.chdir(previous_dir)

        # Install OCP on rhel nodes
        rhel_install = OCPINSTALLRHEL(rhel_worker_nodes)
        rhel_install.prepare_rhel_nodes()
        rhel_install.execute_ansible_playbook()

        # Giving some time to settle down the new nodes
        time.sleep(self.wait_time)

        # wait for nodes to be in READY state
        wait_for_nodes_status(timeout=300)
Пример #5
0
    def destroy_scaleup_nodes(self, scale_up_terraform_data_dir, scale_up_terraform_var):
        """
        Destroy the scale-up nodes

        Args:
            scale_up_terraform_data_dir (str): Path to scale-up terraform
                data directory
            scale_up_terraform_var (str): Path to scale-up
                terraform.tfvars file

        """
        clone_repo(
            constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path
        )
        # modify scale-up repo
        self.modify_scaleup_repo()

        terraform_scale_up = Terraform(
            os.path.join(
                self.upi_scale_up_repo_path,
                "v4-testing-misc/v4-scaleup/vsphere/"
            )
        )
        os.chdir(scale_up_terraform_data_dir)
        terraform_scale_up.initialize(upgrade=True)
        terraform_scale_up.destroy(scale_up_terraform_var)
Пример #6
0
    def destroy_scaleup_nodes(self, scale_up_terraform_data_dir,
                              scale_up_terraform_var):
        """
        Destroy the scale-up nodes

        Args:
            scale_up_terraform_data_dir (str): Path to scale-up terraform
                data directory
            scale_up_terraform_var (str): Path to scale-up
                terraform.tfvars file

        """
        clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path)
        # git clone repo from cluster-launcher
        clone_repo(constants.VSPHERE_CLUSTER_LAUNCHER,
                   self.cluster_launcer_repo_path)

        # modify scale-up repo
        helpers = VSPHEREHELPERS()
        helpers.modify_scaleup_repo()

        vsphere_dir = constants.SCALEUP_VSPHERE_DIR
        if Version.coerce(self.ocp_version) >= Version.coerce("4.5"):
            vsphere_dir = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{get_ocp_version('_')}",
                "vsphere",
            )

        terraform_scale_up = Terraform(vsphere_dir)
        os.chdir(scale_up_terraform_data_dir)
        terraform_scale_up.initialize(upgrade=True)
        terraform_scale_up.destroy(scale_up_terraform_var)
Пример #7
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()
        terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR, 'installer',
        )
        tfvars = os.path.join(
            config.ENV_DATA.get('cluster_path'),
            constants.TERRAFORM_DATA_DIR,
            constants.TERRAFORM_VARS
        )
        clone_repo(
            constants.VSPHERE_INSTALLER_REPO, upi_repo_path
        )
        if (
            os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
            and os.path.exists(f"{constants.VSPHERE_MAIN}.json")
        ):
            os.rename(f"{constants.VSPHERE_MAIN}.json", f"{constants.VSPHERE_MAIN}.json.backup")
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        terraform.initialize(upgrade=True)
        terraform.destroy(tfvars)
        os.chdir(previous_dir)
Пример #8
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR
        )
        scale_up_terraform_var = os.path.join(
            scale_up_terraform_data_dir,
            "scale_up_terraform.tfvars"
        )
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(
                scale_up_terraform_data_dir,
                scale_up_terraform_var
            )
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR, 'installer',
        )
        tfvars = os.path.join(
            config.ENV_DATA.get('cluster_path'),
            constants.TERRAFORM_DATA_DIR,
            constants.TERRAFORM_VARS
        )
        clone_repo(
            constants.VSPHERE_INSTALLER_REPO, upi_repo_path,
            f'release-{get_ocp_version()}'
        )
        if (
            os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
            and os.path.exists(f"{constants.VSPHERE_MAIN}.json")
        ):
            os.rename(f"{constants.VSPHERE_MAIN}.json", f"{constants.VSPHERE_MAIN}.json.backup")

        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        terraform.initialize(upgrade=True)
        terraform.destroy(tfvars)
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()
Пример #9
0
    class OCPDeployment(BaseOCPDeployment):
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(
                constants.EXTERNAL_DIR,
                'installer'
            )
            self.previous_dir = os.getcwd()
            self.terraform_data_dir = os.path.join(self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)

        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_openshift_installer()

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}"
            )
            upload_file(
                config.ENV_DATA.get('httpd_server'),
                bootstrap_path,
                remote_path,
                config.ENV_DATA.get('httpd_server_user'),
                config.ENV_DATA.get('httpd_server_password')
            )

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}"
            )
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.MASTER_IGN
            )
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.WORKER_IGN
            )
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (
                f"{config.ENV_DATA.get('cluster_name')}."
                f"{config.ENV_DATA.get('base_domain')}"
            )
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join(
                "ocp-deployment", terraform_var_template
            )
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA
            )

            terraform_var_yaml = os.path.join(
                self.cluster_path,
                constants.TERRAFORM_DATA_DIR,
                "terraform.tfvars.yaml"
            )
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update the machine configurations
            update_machine_conf()

            # sync guest time with host
            if config.ENV_DATA.get('sync_time_with_host'):
                sync_time_with_host(constants.INSTALLER_MACHINE_CONF, True)

        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2"
            )
            ocp_install_template_path = os.path.join(
                "ocp-deployment", ocp_install_template
            )
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA
            )

            # Parse the rendered YAML so that we can manipulate the object directly
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj['pullSecret'] = self.get_pull_secret()
            install_config_obj['sshKey'] = self.get_ssh_key()
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path, "install-config.yaml")
            with open(install_config, "w") as f:
                f.write(install_config_str)

        def create_ignitions(self):
            """
            Creates the ignition files
            """
            logger.info("creating ignition files for the cluster")
            run_cmd(
                f"{self.installer} create ignition-configs "
                f"--dir {self.cluster_path} "
            )

        def configure_storage_for_image_registry(self, kubeconfig):
            """
            Configures storage for the image registry
            """
            logger.info("configuring storage for image registry")
            patch = " '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}' "
            run_cmd(
                f"oc --kubeconfig {kubeconfig} patch "
                f"configs.imageregistry.operator.openshift.io "
                f"cluster --type merge --patch {patch}"
            )

        def deploy(self, log_cli_level='DEBUG'):
            """
            Deployment specific to OCP cluster on this platform

            Args:
                log_cli_level (str): openshift installer's log level
                    (default: "DEBUG")

            """
            logger.info("Deploying OCP cluster for vSphere platform")
            logger.info(
                f"Openshift-installer will be using loglevel:{log_cli_level}"
            )
            os.chdir(self.terraform_data_dir)
            self.terraform.initialize()
            self.terraform.apply(self.terraform_var)
            os.chdir(self.previous_dir)
            logger.info("waiting for bootstrap to complete")
            try:
                run_cmd(
                    f"{self.installer} wait-for bootstrap-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}",
                    timeout=3600
                )
            except CommandFailed as e:
                if constants.GATHER_BOOTSTRAP_PATTERN in str(e):
                    try:
                        gather_bootstrap()
                    except Exception as ex:
                        logger.error(ex)
                raise e

            if not config.DEPLOYMENT['preserve_bootstrap_node']:
                logger.info("removing bootstrap node")
                os.chdir(self.terraform_data_dir)
                self.terraform.apply(
                    self.terraform_var, bootstrap_complete=True
                )
                os.chdir(self.previous_dir)

            OCP.set_kubeconfig(self.kubeconfig)

            # wait for all nodes to generate CSR
            # From OCP version 4.4 and above, we have to approve CSR manually
            # for all the nodes
            ocp_version = get_ocp_version()
            if Version.coerce(ocp_version) >= Version.coerce('4.4'):
                wait_for_all_nodes_csr_and_approve(timeout=1200, sleep=30)

            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(
                f"{self.installer} wait-for install-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=1800
            )

            # Approving CSRs here in-case if any exists
            approve_pending_csr()

            self.test_cluster()
Пример #10
0
    class OCPDeployment(BaseOCPDeployment):
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              'installer')
            self.previous_dir = os.getcwd()
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)

        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path)

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}")
            upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path,
                        remote_path, config.ENV_DATA.get('httpd_server_user'),
                        config.ENV_DATA.get('httpd_server_password'))

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace(
                "/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}")
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN)
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN)
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}."
                              f"{config.ENV_DATA.get('base_domain')}")
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join("ocp-deployment",
                                                       terraform_var_template)
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA)

            terraform_var_yaml = os.path.join(self.cluster_path,
                                              constants.TERRAFORM_DATA_DIR,
                                              "terraform.tfvars.yaml")
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update gateway and DNS
            if config.ENV_DATA.get('gateway'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        '${cidrhost(var.machine_cidr,1)}',
                                        f"{config.ENV_DATA.get('gateway')}")

            if config.ENV_DATA.get('dns'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        constants.INSTALLER_DEFAULT_DNS,
                                        f"{config.ENV_DATA.get('dns')}")

            # update the zone in route
            if config.ENV_DATA.get('region'):
                def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get(
                    'region')
                replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz",
                                        def_zone)

            # increase memory
            if config.ENV_DATA.get('memory'):
                replace_content_in_file(constants.INSTALLER_MACHINE_CONF,
                                        '${var.memory}',
                                        config.ENV_DATA.get('memory'))

            # increase CPUs
            worker_num_cpus = config.ENV_DATA.get('worker_num_cpus')
            master_num_cpus = config.ENV_DATA.get('master_num_cpus')
            if worker_num_cpus or master_num_cpus:
                with open(constants.VSPHERE_MAIN, 'r') as fd:
                    obj = hcl.load(fd)
                    if worker_num_cpus:
                        obj['module']['compute']['num_cpu'] = worker_num_cpus
                    if master_num_cpus:
                        obj['module']['control_plane'][
                            'num_cpu'] = master_num_cpus
                # Dump data to json file since hcl module
                # doesn't support dumping of data in HCL format
                dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json")
                os.rename(constants.VSPHERE_MAIN,
                          f"{constants.VSPHERE_MAIN}.backup")

        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2")
            ocp_install_template_path = os.path.join("ocp-deployment",
                                                     ocp_install_template)
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA)

            # Parse the rendered YAML so that we can manipulate the object directly
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj['pullSecret'] = self.get_pull_secret()
            install_config_obj['sshKey'] = self.get_ssh_key()
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path,
                                          "install-config.yaml")
            with open(install_config, "w") as f:
                f.write(install_config_str)

        def create_ignitions(self):
            """
            Creates the ignition files
            """
            logger.info("creating ignition files for the cluster")
            run_cmd(f"{self.installer} create ignition-configs "
                    f"--dir {self.cluster_path} ")

        def configure_storage_for_image_registry(self, kubeconfig):
            """
            Configures storage for the image registry
            """
            logger.info("configuring storage for image registry")
            patch = " '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}' "
            run_cmd(f"oc --kubeconfig {kubeconfig} patch "
                    f"configs.imageregistry.operator.openshift.io "
                    f"cluster --type merge --patch {patch}")

        def deploy(self, log_cli_level='DEBUG'):
            """
            Deployment specific to OCP cluster on this platform

            Args:
                log_cli_level (str): openshift installer's log level
                    (default: "DEBUG")

            """
            logger.info("Deploying OCP cluster for vSphere platform")
            logger.info(
                f"Openshift-installer will be using loglevel:{log_cli_level}")
            os.chdir(self.terraform_data_dir)
            self.terraform.initialize()
            self.terraform.apply(self.terraform_var)
            os.chdir(self.previous_dir)
            logger.info("waiting for bootstrap to complete")
            run_cmd(
                f"{self.installer} wait-for bootstrap-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=3600)
            logger.info("removing bootstrap node")
            os.chdir(self.terraform_data_dir)
            self.terraform.apply(self.terraform_var, bootstrap_complete=True)
            os.chdir(self.previous_dir)

            OCP.set_kubeconfig(self.kubeconfig)
            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(
                f"{self.installer} wait-for install-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=1800)

            self.test_cluster()
Пример #11
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on terraform version
        # in terraform.log
        terraform_log_path = os.path.join(config.ENV_DATA.get("cluster_path"),
                                          config.ENV_DATA.get("TF_LOG_FILE"))

        # check for terraform.log, this check is for partially
        # deployed clusters
        try:
            with open(terraform_log_path, "r") as fd:
                logger.debug(
                    f"Reading terraform version from {terraform_log_path}")
                version_line = fd.readline()
                terraform_version = version_line.split()[-1]
        except FileNotFoundError:
            logger.debug(f"{terraform_log_path} file not found")
            terraform_version = config.DEPLOYMENT["terraform_version"]

        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA["terraform_installer"] = terraform_installer

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce("4.5"):
            set_aws_region()
            self.folder_structure = True
            config.ENV_DATA["folder_structure"] = self.folder_structure

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR,
        )
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            "installer",
        )
        tfvars = os.path.join(
            config.ENV_DATA.get("cluster_path"),
            constants.TERRAFORM_DATA_DIR,
            constants.TERRAFORM_VARS,
        )

        clone_openshift_installer()
        if os.path.exists(
                f"{constants.VSPHERE_MAIN}.backup") and os.path.exists(
                    f"{constants.VSPHERE_MAIN}.json"):
            os.rename(
                f"{constants.VSPHERE_MAIN}.json",
                f"{constants.VSPHERE_MAIN}.json.backup",
            )

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        if Version.coerce(ocp_version) >= Version.coerce("4.6"):
            # Download terraform ignition provider. For OCP upgrade clusters,
            # ignition provider doesn't exist, so downloading in destroy job
            # as well
            terraform_plugins_path = ".terraform/plugins/linux_amd64/"
            terraform_ignition_provider_path = os.path.join(
                terraform_data_dir,
                terraform_plugins_path,
                "terraform-provider-ignition",
            )

            # check the upgrade history of cluster and checkout to the
            # original installer release. This is due to the issue of not
            # supporting terraform state of OCP 4.5 in installer
            # release of 4.6 branch. More details in
            # https://github.com/red-hat-storage/ocs-ci/issues/2941
            is_cluster_upgraded = False
            try:
                upgrade_history = get_ocp_upgrade_history()
                if len(upgrade_history) > 1:
                    is_cluster_upgraded = True
                    original_installed_ocp_version = upgrade_history[-1]
                    installer_release_branch = (
                        f"release-{original_installed_ocp_version[0:3]}")
                    clone_repo(
                        constants.VSPHERE_INSTALLER_REPO,
                        upi_repo_path,
                        installer_release_branch,
                    )
            except Exception as ex:
                logger.error(ex)

            if not (os.path.exists(terraform_ignition_provider_path)
                    or is_cluster_upgraded):
                get_terraform_ignition_provider(terraform_data_dir)
            terraform.initialize()
        else:
            terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()
Пример #12
0
    class OCPDeployment(BaseOCPDeployment):
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              "installer")
            self.previous_dir = os.getcwd()

            # get OCP version
            ocp_version = get_ocp_version()

            # create terraform_data directory
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT["terraform_version"]
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA["terraform_installer"] = terraform_installer

            # Download terraform ignition provider
            # ignition provider dependancy from OCP 4.6
            if Version.coerce(ocp_version) >= Version.coerce("4.6"):
                get_terraform_ignition_provider(self.terraform_data_dir)

            # Initialize Terraform
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)

            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce("4.5"):
                self.folder_structure = True
                config.ENV_DATA["folder_structure"] = self.folder_structure

        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # generate manifests
            self.generate_manifests()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get("kubeconfig_location"))
            self.terraform_var = os.path.join(
                config.ENV_DATA["cluster_path"],
                constants.TERRAFORM_DATA_DIR,
                "terraform.tfvars",
            )

            # git clone repo from openshift installer
            clone_openshift_installer()

            # generate terraform variable file
            generate_terraform_vars_and_update_machine_conf()

            # sync guest time with host
            vm_file = (constants.VM_MAIN if self.folder_structure else
                       constants.INSTALLER_MACHINE_CONF)
            if config.ENV_DATA.get("sync_time_with_host"):
                sync_time_with_host(vm_file, True)

        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2")
            ocp_install_template_path = os.path.join("ocp-deployment",
                                                     ocp_install_template)
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA)

            # Parse the rendered YAML so that we can manipulate the object directly
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj["pullSecret"] = self.get_pull_secret()
            install_config_obj["sshKey"] = self.get_ssh_key()
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path,
                                          "install-config.yaml")
            with open(install_config, "w") as f:
                f.write(install_config_str)

        def generate_manifests(self):
            """
            Generates manifest files
            """
            logger.info("creating manifest files for the cluster")
            run_cmd(
                f"{self.installer} create manifests --dir {self.cluster_path}")

            # remove machines and machinesets
            # Some of the manifests produced are for creating machinesets
            # and machine objects. We should remove these, because we don't
            # want to involve the machine-API operator and
            # machine-api-operator during install.
            manifest_files_path = os.path.join(self.cluster_path, "openshift")
            files_to_remove = glob.glob(
                f"{manifest_files_path}/99_openshift-cluster-api_"
                f"master-machines-*.yaml")
            files_to_remove.extend(
                glob.glob(f"{manifest_files_path}/99_openshift-cluster-api_"
                          f"worker-machineset-*.yaml"))
            logger.debug(
                f"Removing machines and machineset files: {files_to_remove}")
            for each_file in files_to_remove:
                os.remove(each_file)

        def create_ignitions(self):
            """
            Creates the ignition files
            """
            logger.info("creating ignition files for the cluster")
            run_cmd(f"{self.installer} create ignition-configs "
                    f"--dir {self.cluster_path} ")

        @retry(exceptions.CommandFailed, tries=10, delay=30, backoff=1)
        def configure_storage_for_image_registry(self, kubeconfig):
            """
            Configures storage for the image registry
            """
            logger.info("configuring storage for image registry")
            patch = ' \'{"spec":{"storage":{"emptyDir":{}}}}\' '
            run_cmd(f"oc --kubeconfig {kubeconfig} patch "
                    f"configs.imageregistry.operator.openshift.io "
                    f"cluster --type merge --patch {patch}")

        def deploy(self, log_cli_level="DEBUG"):
            """
            Deployment specific to OCP cluster on this platform

            Args:
                log_cli_level (str): openshift installer's log level
                    (default: "DEBUG")

            """
            logger.info("Deploying OCP cluster for vSphere platform")
            logger.info(
                f"Openshift-installer will be using loglevel:{log_cli_level}")
            os.chdir(self.terraform_data_dir)
            self.terraform.initialize()
            self.terraform.apply(self.terraform_var)
            os.chdir(self.previous_dir)
            logger.info("waiting for bootstrap to complete")
            try:
                run_cmd(
                    f"{self.installer} wait-for bootstrap-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}",
                    timeout=3600,
                )
            except CommandFailed as e:
                if constants.GATHER_BOOTSTRAP_PATTERN in str(e):
                    try:
                        gather_bootstrap()
                    except Exception as ex:
                        logger.error(ex)
                raise e

            if self.folder_structure:
                # comment bootstrap module
                comment_bootstrap_in_lb_module()

                # remove bootstrap IP in load balancer and
                # restart haproxy
                lb = LoadBalancer()
                lb.remove_boostrap_in_proxy()
                lb.restart_haproxy()

            # remove bootstrap node
            if not config.DEPLOYMENT["preserve_bootstrap_node"]:
                logger.info("removing bootstrap node")
                os.chdir(self.terraform_data_dir)
                if self.folder_structure:
                    self.terraform.destroy_module(self.terraform_var,
                                                  constants.BOOTSTRAP_MODULE)
                else:
                    self.terraform.apply(self.terraform_var,
                                         bootstrap_complete=True)
                os.chdir(self.previous_dir)

            OCP.set_kubeconfig(self.kubeconfig)

            # wait for all nodes to generate CSR
            # From OCP version 4.4 and above, we have to approve CSR manually
            # for all the nodes
            ocp_version = get_ocp_version()
            if Version.coerce(ocp_version) >= Version.coerce("4.4"):
                wait_for_all_nodes_csr_and_approve(timeout=1200, sleep=30)

            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(
                f"{self.installer} wait-for install-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=1800,
            )

            # Approving CSRs here in-case if any exists
            approve_pending_csr()

            self.test_cluster()
Пример #13
0
    def add_nodes(self):
        """
        Add new nodes to the cluster
        """
        # create separate directory for scale-up terraform data
        scaleup_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR,
        )
        create_directory_path(scaleup_terraform_data_dir)
        logger.info(
            f"scale-up terraform data directory: {scaleup_terraform_data_dir}")

        # git clone repo from openshift-misc
        clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path)

        # git clone repo from cluster-launcher
        clone_repo(constants.VSPHERE_CLUSTER_LAUNCHER,
                   self.cluster_launcer_repo_path)

        helpers = VSPHEREHELPERS()
        helpers.modify_scaleup_repo()

        config.ENV_DATA["vsphere_resource_pool"] = config.ENV_DATA.get(
            "cluster_name")

        # sync guest time with host
        sync_time_with_host_file = constants.SCALEUP_VSPHERE_MACHINE_CONF
        if config.ENV_DATA["folder_structure"]:
            sync_time_with_host_file = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{get_ocp_version(seperator='_')}",
                constants.CLUSTER_LAUNCHER_MACHINE_CONF,
            )
        if config.ENV_DATA.get("sync_time_with_host"):
            sync_time_with_host(sync_time_with_host_file, True)

        # get the RHCOS worker list
        rhcos_ips = get_node_ips()
        logger.info(f"RHCOS IP's: {json.dumps(rhcos_ips)}")

        # generate terraform variable for scaling nodes
        self.scale_up_terraform_var = helpers.generate_terraform_vars_for_scaleup(
            rhcos_ips)

        # choose the vsphere_dir based on OCP version
        # generate cluster_info and config yaml files
        # for OCP version greater than 4.4
        vsphere_dir = constants.SCALEUP_VSPHERE_DIR
        rhel_module = "rhel-worker"
        if Version.coerce(self.ocp_version) >= Version.coerce("4.5"):
            vsphere_dir = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{get_ocp_version('_')}",
                "vsphere",
            )
            helpers.generate_cluster_info()
            helpers.generate_config_yaml()
            rhel_module = "RHEL_WORKER_LIST"

        # Add nodes using terraform
        scaleup_terraform = Terraform(vsphere_dir)
        previous_dir = os.getcwd()
        os.chdir(scaleup_terraform_data_dir)
        scaleup_terraform.initialize()
        scaleup_terraform.apply(self.scale_up_terraform_var)
        scaleup_terraform_tfstate = os.path.join(scaleup_terraform_data_dir,
                                                 "terraform.tfstate")
        out = scaleup_terraform.output(scaleup_terraform_tfstate, rhel_module)
        if config.ENV_DATA["folder_structure"]:
            rhel_worker_nodes = out.strip().replace('"', "").split(",")
        else:
            rhel_worker_nodes = json.loads(out)["value"]

        logger.info(f"RHEL worker nodes: {rhel_worker_nodes}")
        os.chdir(previous_dir)

        # Install OCP on rhel nodes
        rhel_install = OCPINSTALLRHEL(rhel_worker_nodes)
        rhel_install.prepare_rhel_nodes()
        rhel_install.execute_ansible_playbook()

        # Giving some time to settle down the new nodes
        time.sleep(self.wait_time)

        # wait for nodes to be in READY state
        wait_for_nodes_status(timeout=300)
Пример #14
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on terraform version
        # in terraform.log
        terraform_log_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          config.ENV_DATA.get('TF_LOG_FILE'))

        # check for terraform.log, this check is for partially
        # deployed clusters
        try:
            with open(terraform_log_path, 'r') as fd:
                logger.debug(
                    f"Reading terraform version from {terraform_log_path}")
                version_line = fd.readline()
                terraform_version = version_line.split()[-1]
        except FileNotFoundError:
            logger.debug(f"{terraform_log_path} file not found")
            terraform_version = config.DEPLOYMENT['terraform_version']

        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA['terraform_installer'] = terraform_installer

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce('4.5'):
            set_aws_region()
            self.folder_structure = True
            config.ENV_DATA['folder_structure'] = self.folder_structure

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            'installer',
        )
        tfvars = os.path.join(config.ENV_DATA.get('cluster_path'),
                              constants.TERRAFORM_DATA_DIR,
                              constants.TERRAFORM_VARS)

        clone_openshift_installer()
        if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
                and os.path.exists(f"{constants.VSPHERE_MAIN}.json")):
            os.rename(f"{constants.VSPHERE_MAIN}.json",
                      f"{constants.VSPHERE_MAIN}.json.backup")

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        if Version.coerce(ocp_version) >= Version.coerce('4.6'):
            terraform.initialize()
        else:
            terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()
Пример #15
0
    def destroy_cluster(self, log_level="DEBUG"):
        """
        Destroy OCP cluster specific to vSphere UPI

        Args:
            log_level (str): log level openshift-installer (default: DEBUG)

        """
        previous_dir = os.getcwd()

        # Download terraform binary based on ocp version and
        # update the installer path in ENV_DATA
        # use "0.11.14" for releases below OCP 4.5
        # TODO: For cluster installed by old version of terraform we need to
        # still run old version for destroy ( upgrade scenario )
        terraform_version = config.DEPLOYMENT['terraform_version']
        terraform_installer = get_terraform(version=terraform_version)
        config.ENV_DATA['terraform_installer'] = terraform_installer

        # delete the extra disks
        self.delete_disks()

        # check whether cluster has scale-up nodes
        scale_up_terraform_data_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        scale_up_terraform_var = os.path.join(scale_up_terraform_data_dir,
                                              "scale_up_terraform.tfvars")
        if os.path.exists(scale_up_terraform_var):
            os.chdir(scale_up_terraform_data_dir)
            self.destroy_scaleup_nodes(scale_up_terraform_data_dir,
                                       scale_up_terraform_var)
            os.chdir(previous_dir)

        terraform_data_dir = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR)
        upi_repo_path = os.path.join(
            constants.EXTERNAL_DIR,
            'installer',
        )
        tfvars = os.path.join(config.ENV_DATA.get('cluster_path'),
                              constants.TERRAFORM_DATA_DIR,
                              constants.TERRAFORM_VARS)

        clone_openshift_installer()
        if (os.path.exists(f"{constants.VSPHERE_MAIN}.backup")
                and os.path.exists(f"{constants.VSPHERE_MAIN}.json")):
            os.rename(f"{constants.VSPHERE_MAIN}.json",
                      f"{constants.VSPHERE_MAIN}.json.backup")

        # getting OCP version here since we run destroy job as
        # separate job in jenkins
        ocp_version = get_ocp_version()
        self.folder_structure = False
        if Version.coerce(ocp_version) >= Version.coerce('4.5'):
            set_aws_region()
            self.folder_structure = True

        # terraform initialization and destroy cluster
        terraform = Terraform(os.path.join(upi_repo_path, "upi/vsphere/"))
        os.chdir(terraform_data_dir)
        terraform.initialize(upgrade=True)
        terraform.destroy(tfvars, refresh=(not self.folder_structure))
        os.chdir(previous_dir)

        # post destroy checks
        self.post_destroy_checks()