Beispiel #1
0
    def upload_exporter_script(self):
        """
        Upload exporter script to RHCS cluster

        Returns:
            str: absolute path to exporter script

        """
        script_path = generate_exporter_script()
        upload_file(self.host, script_path, script_path, self.user, self.password)
        return script_path
Beispiel #2
0
def generate_terraform_vars_with_out_folder():
    """
    Generates the normal ( old structure ) terraform.tfvars file
    """
    logger.info("Generating terraform variables without folder structure")

    # upload bootstrap ignition to public access server
    bootstrap_path = os.path.join(
        config.ENV_DATA.get("cluster_path"), constants.BOOTSTRAP_IGN
    )
    remote_path = os.path.join(
        config.ENV_DATA.get("path_to_upload"),
        f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}",
    )
    upload_file(
        config.ENV_DATA.get("httpd_server"),
        bootstrap_path,
        remote_path,
        config.ENV_DATA.get("httpd_server_user"),
        config.ENV_DATA.get("httpd_server_password"),
    )

    # generate bootstrap ignition url
    path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "")
    bootstrap_ignition_url = (
        f"http://{config.ENV_DATA.get('httpd_server')}/"
        f"{path_to_bootstrap_on_remote}"
    )
    logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
    config.ENV_DATA["bootstrap_ignition_url"] = bootstrap_ignition_url

    # load master and worker ignitions to variables
    master_ignition_path = os.path.join(
        config.ENV_DATA.get("cluster_path"), constants.MASTER_IGN
    )
    master_ignition = read_file_as_str(f"{master_ignition_path}")
    config.ENV_DATA["control_plane_ignition"] = master_ignition

    worker_ignition_path = os.path.join(
        config.ENV_DATA.get("cluster_path"), constants.WORKER_IGN
    )
    worker_ignition = read_file_as_str(f"{worker_ignition_path}")
    config.ENV_DATA["compute_ignition"] = worker_ignition

    cluster_domain = (
        f"{config.ENV_DATA.get('cluster_name')}."
        f"{config.ENV_DATA.get('base_domain')}"
    )
    config.ENV_DATA["cluster_domain"] = cluster_domain

    # generate terraform variables from template
    create_terraform_var_file("terraform.tfvars.j2")
Beispiel #3
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_openshift_installer()

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}"
            )
            upload_file(
                config.ENV_DATA.get('httpd_server'),
                bootstrap_path,
                remote_path,
                config.ENV_DATA.get('httpd_server_user'),
                config.ENV_DATA.get('httpd_server_password')
            )

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}"
            )
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.MASTER_IGN
            )
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.WORKER_IGN
            )
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (
                f"{config.ENV_DATA.get('cluster_name')}."
                f"{config.ENV_DATA.get('base_domain')}"
            )
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join(
                "ocp-deployment", terraform_var_template
            )
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA
            )

            terraform_var_yaml = os.path.join(
                self.cluster_path,
                constants.TERRAFORM_DATA_DIR,
                "terraform.tfvars.yaml"
            )
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update the machine configurations
            update_machine_conf()

            # sync guest time with host
            if config.ENV_DATA.get('sync_time_with_host'):
                sync_time_with_host(constants.INSTALLER_MACHINE_CONF, True)
Beispiel #4
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path)

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}")
            upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path,
                        remote_path, config.ENV_DATA.get('httpd_server_user'),
                        config.ENV_DATA.get('httpd_server_password'))

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace(
                "/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}")
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN)
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN)
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}."
                              f"{config.ENV_DATA.get('base_domain')}")
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join("ocp-deployment",
                                                       terraform_var_template)
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA)

            terraform_var_yaml = os.path.join(self.cluster_path,
                                              constants.TERRAFORM_DATA_DIR,
                                              "terraform.tfvars.yaml")
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update gateway and DNS
            if config.ENV_DATA.get('gateway'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        '${cidrhost(var.machine_cidr,1)}',
                                        f"{config.ENV_DATA.get('gateway')}")

            if config.ENV_DATA.get('dns'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        constants.INSTALLER_DEFAULT_DNS,
                                        f"{config.ENV_DATA.get('dns')}")

            # update the zone in route
            if config.ENV_DATA.get('region'):
                def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get(
                    'region')
                replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz",
                                        def_zone)

            # increase memory
            if config.ENV_DATA.get('memory'):
                replace_content_in_file(constants.INSTALLER_MACHINE_CONF,
                                        '${var.memory}',
                                        config.ENV_DATA.get('memory'))

            # increase CPUs
            worker_num_cpus = config.ENV_DATA.get('worker_num_cpus')
            master_num_cpus = config.ENV_DATA.get('master_num_cpus')
            if worker_num_cpus or master_num_cpus:
                with open(constants.VSPHERE_MAIN, 'r') as fd:
                    obj = hcl.load(fd)
                    if worker_num_cpus:
                        obj['module']['compute']['num_cpu'] = worker_num_cpus
                    if master_num_cpus:
                        obj['module']['control_plane'][
                            'num_cpu'] = master_num_cpus
                # Dump data to json file since hcl module
                # doesn't support dumping of data in HCL format
                dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json")
                os.rename(constants.VSPHERE_MAIN,
                          f"{constants.VSPHERE_MAIN}.backup")
Beispiel #5
0
        def deploy_prereq(self):
            """
            Pre-Requisites for Bare Metal UPI Deployment
            """
            super(BAREMETALUPI.OCPDeployment, self).deploy_prereq()
            # check for BM status
            logger.info("Checking BM Status")
            status = self.check_bm_status_exist()
            assert status == constants.BM_STATUS_ABSENT, "BM Cluster still present"
            # update BM status
            logger.info("Updating BM Status")
            result = self.update_bm_status(constants.BM_STATUS_PRESENT)
            assert (result == constants.BM_STATUS_RESPONSE_UPDATED
                    ), "Failed to update request"
            # create manifest
            self.create_manifest()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get("kubeconfig_location"))
            bootstrap_path = os.path.join(config.ENV_DATA.get("cluster_path"),
                                          constants.BOOTSTRAP_IGN)
            master_path = os.path.join(config.ENV_DATA.get("cluster_path"),
                                       constants.MASTER_IGN)
            worker_path = os.path.join(config.ENV_DATA.get("cluster_path"),
                                       constants.WORKER_IGN)

            self.host = self.helper_node_details["bm_httpd_server"]
            self.user = self.helper_node_details["bm_httpd_server_user"]
            self.private_key = os.path.expanduser(
                config.DEPLOYMENT["ssh_key_private"])

            self.helper_node_handler = Connection(self.host, self.user,
                                                  self.private_key)
            cmd = f"rm -rf {self.helper_node_details['bm_path_to_upload']}"
            logger.info(self.helper_node_handler.exec_cmd(cmd=cmd))
            cmd = f"mkdir -m 755 {self.helper_node_details['bm_path_to_upload']}"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to create required folder"
            # Upload ignition to public access server
            upload_dict = {
                bootstrap_path: constants.BOOTSTRAP_IGN,
                master_path: constants.MASTER_IGN,
                worker_path: constants.WORKER_IGN,
            }

            for key, val in zip(upload_dict.keys(), upload_dict.values()):
                upload_file(
                    self.host,
                    key,
                    os.path.join(self.helper_node_details["bm_path_to_upload"],
                                 f"{val}"),
                    self.user,
                    key_file=self.private_key,
                )

            # Perform Cleanup for stale entry's
            cmd = f"rm -rf {self.helper_node_details['bm_tftp_base_dir']}"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to Delete folder"

            # Installing Required packages
            cmd = "yum install dnsmasq -y"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to install required package"

            # Enable dnsmasq service on boot
            cmd = "systemctl enable dnsmasq"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to Enable dnsmasq service"

            # Starting dnsmasq service
            cmd = "systemctl start dnsmasq"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to Start dnsmasq service"

            cmd = f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to create required folder"

            cmd = (
                f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}ocs4qe"
            )
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to create required folder"

            cmd = f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}ocs4qe/baremetal"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to create required folder"

            cmd = f"rm -rf {self.helper_node_details['bm_dnsmasq_dir']}*"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to Delete dir"

            # Install syslinux
            cmd = "yum install syslinux -y"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to install required package"

            # Copy syslinux files to the tftp path
            cmd = f"cp -ar /usr/share/syslinux/* {self.helper_node_details['bm_tftp_dir']}"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to Copy required files"

            upload_dict = {
                constants.PXE_CONF_FILE: "dnsmasq.pxe.conf",
                constants.COMMON_CONF_FILE: "dnsmasq.common.conf",
            }
            for key, val in zip(upload_dict.keys(), upload_dict.values()):
                upload_file(
                    self.host,
                    key,
                    os.path.join(self.helper_node_details["bm_dnsmasq_dir"],
                                 val),
                    self.user,
                    key_file=self.private_key,
                )
            # Restarting dnsmasq service
            cmd = "systemctl restart dnsmasq"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to restart dnsmasq service"
            with open(constants.RHCOS_IMAGES_FILE) as file_stream:
                rhcos_images_file = yaml.safe_load(file_stream)
            ocp_version = get_ocp_version()
            float_ocp_version = float(ocp_version)
            logger.info(rhcos_images_file)
            image_data = rhcos_images_file[ocp_version]
            # Download installer_initramfs
            initramfs_image_path = (constants.coreos_url_prefix +
                                    image_data["installer_initramfs_url"])
            if check_for_rhcos_images(initramfs_image_path):
                cmd = ("wget -O "
                       f"{self.helper_node_details['bm_tftp_dir']}"
                       "/rhcos-installer-initramfs.x86_64.img "
                       f"{initramfs_image_path}")
                assert self.helper_node_handler.exec_cmd(
                    cmd=cmd), "Failed to Download required File"
            else:
                raise RhcosImageNotFound
            # Download installer_kernel
            kernel_image_path = (constants.coreos_url_prefix +
                                 image_data["installer_kernel_url"])
            if check_for_rhcos_images(kernel_image_path):
                cmd = ("wget -O "
                       f"{self.helper_node_details['bm_tftp_dir']}"
                       "/rhcos-installer-kernel-x86_64 "
                       f"{kernel_image_path}")
                assert self.helper_node_handler.exec_cmd(
                    cmd=cmd), "Failed to Download required File"
            else:
                raise RhcosImageNotFound
            # Download metal_bios
            if float_ocp_version <= 4.6:
                metal_image_path = (constants.coreos_url_prefix +
                                    image_data["metal_bios_url"])
                if check_for_rhcos_images(metal_image_path):
                    cmd = ("wget -O "
                           f"{self.helper_node_details['bm_path_to_upload']}"
                           f"/{constants.BM_METAL_IMAGE} "
                           f"{metal_image_path}")
                    assert self.helper_node_handler.exec_cmd(
                        cmd=cmd), "Failed to Download required File"
                else:
                    raise RhcosImageNotFound

            if float_ocp_version >= 4.6:
                # Download metal_bios
                rootfs_image_path = (constants.coreos_url_prefix +
                                     image_data["live_rootfs_url"])
                if check_for_rhcos_images(rootfs_image_path):
                    cmd = ("wget -O "
                           f"{self.helper_node_details['bm_path_to_upload']}"
                           "/rhcos-live-rootfs.x86_64.img "
                           f"{rootfs_image_path}")
                    assert self.helper_node_handler.exec_cmd(
                        cmd=cmd), "Failed to Download required File"
                else:
                    raise RhcosImageNotFound

            # Create pxelinux.cfg directory
            cmd = f"mkdir -m 755 {self.helper_node_details['bm_tftp_dir']}/pxelinux.cfg"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to create required folder"
Beispiel #6
0
        def deploy(self, log_cli_level="DEBUG"):
            """
            Deploy
            """
            # Uploading pxe files
            logger.info("Deploying OCP cluster for Bare Metal platform")
            logger.info(
                f"Openshift-installer will be using log level:{log_cli_level}")
            upload_file(
                self.host,
                constants.COMMON_CONF_FILE,
                os.path.join(self.helper_node_details["bm_dnsmasq_dir"],
                             "dnsmasq.common.conf"),
                self.user,
                key_file=self.private_key,
            )
            logger.info("Uploading PXE files")
            ocp_version = get_ocp_version()
            float_ocp_version = float(ocp_version)
            for machine in self.mgmt_details:
                if self.mgmt_details[machine].get(
                        "cluster_name") or self.mgmt_details[machine].get(
                            "extra_node"):
                    pxe_file_path = self.create_pxe_files(
                        ocp_version=float_ocp_version,
                        role=self.mgmt_details[machine].get("role"),
                    )
                    upload_file(
                        server=self.host,
                        localpath=pxe_file_path,
                        remotepath=f"{self.helper_node_details['bm_tftp_dir']}"
                        f"/pxelinux.cfg/01-{self.mgmt_details[machine]['mac'].replace(':', '-')}",
                        user=self.user,
                        key_file=self.private_key,
                    )
            # Applying Permission
            cmd = f"chmod 755 -R {self.helper_node_details['bm_tftp_dir']}"
            self.helper_node_handler.exec_cmd(cmd=cmd)

            # Applying Permission
            cmd = f"chmod 755 -R {self.helper_node_details['bm_path_to_upload']}"
            self.helper_node_handler.exec_cmd(cmd=cmd)

            # Restarting dnsmasq service
            cmd = "systemctl restart dnsmasq"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to restart dnsmasq service"
            # Rebooting Machine with pxe boot

            for machine in self.mgmt_details:
                if (self.mgmt_details[machine].get("cluster_name") ==
                        constants.BM_DEFAULT_CLUSTER_NAME):
                    secrets = [
                        self.mgmt_details[machine]["mgmt_username"],
                        self.mgmt_details[machine]["mgmt_password"],
                    ]
                    # Changes boot prioriy to pxe
                    cmd = (
                        f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
                        f"-P {self.mgmt_details[machine]['mgmt_password']} "
                        f"-H {self.mgmt_details[machine]['mgmt_console']} chassis bootdev pxe"
                    )
                    run_cmd(cmd=cmd, secrets=secrets)
                    sleep(2)
                    # Power On Machine
                    cmd = (
                        f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
                        f"-P {self.mgmt_details[machine]['mgmt_password']} "
                        f"-H {self.mgmt_details[machine]['mgmt_console']} chassis power cycle || "
                        f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
                        f"-P {self.mgmt_details[machine]['mgmt_password']} "
                        f"-H {self.mgmt_details[machine]['mgmt_console']} chassis power on"
                    )
                    run_cmd(cmd=cmd, secrets=secrets)
            logger.info("waiting for bootstrap to complete")
            try:
                run_cmd(
                    f"{self.installer} wait-for bootstrap-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}",
                    timeout=3600,
                )
            except CommandFailed as e:
                if constants.GATHER_BOOTSTRAP_PATTERN in str(e):
                    try:
                        gather_bootstrap()
                    except Exception as ex:
                        logger.error(ex)
                raise e

            OCP.set_kubeconfig(self.kubeconfig)
            wait_for_all_nodes_csr_and_approve()
            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(
                f"{self.installer} wait-for install-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=1800,
            )

            # Approving CSRs here in-case if any exists
            approve_pending_csr()

            self.test_cluster()
            logger.info("Performing Disk cleanup")
            clean_disk()
            # We need NTP for OCS cluster to become clean
            configure_chrony_and_wait_for_machineconfig_status(node_type="all")
Beispiel #7
0
    def generate_cluster_info(self):
        """
        Generates the cluster information file
        """
        logger.info("Generating cluster information file")

        # get kubeconfig and upload to httpd server
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get('kubeconfig_location'))
        remote_path = os.path.join(config.ENV_DATA.get('path_to_upload'),
                                   f"{config.RUN.get('run_id')}_kubeconfig")
        upload_file(config.ENV_DATA.get('httpd_server'), kubeconfig,
                    remote_path, config.ENV_DATA.get('httpd_server_user'),
                    config.ENV_DATA.get('httpd_server_password'))

        #  Form the kubeconfig url path
        kubeconfig_url_path = os.path.join(
            'http://', config.ENV_DATA.get('httpd_server'),
            remote_path.lstrip('/var/www/html/'))
        config.ENV_DATA['kubeconfig_url'] = kubeconfig_url_path

        # get the infra_id
        infra_id = get_infra_id(self.cluster_path)
        config.ENV_DATA['infra_id'] = infra_id

        # get the cluster id
        cluster_id = get_cluster_id(self.cluster_path)
        config.ENV_DATA['cluster_id'] = cluster_id

        # fetch the installer version
        installer_version_str = run_cmd(
            f"{config.RUN['bin_dir']}/openshift-install version")
        installer_version = installer_version_str.split()[1]
        config.ENV_DATA['installer_version'] = installer_version

        # get the major and minor version of OCP
        version_obj = Version(installer_version)
        ocp_version_x = version_obj.major
        ocp_version_y = version_obj.minor
        config.ENV_DATA['ocp_version_x'] = ocp_version_x
        config.ENV_DATA['ocp_version_y'] = ocp_version_y

        # generate the cluster info yaml file
        terraform_var_template = "cluster_info.yaml.j2"
        terraform_var_template_path = os.path.join("ocp-deployment",
                                                   terraform_var_template)
        terraform_config_str = self._templating.render_template(
            terraform_var_template_path, config.ENV_DATA)
        terraform_var_yaml = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR,
                                          constants.SCALEUP_TERRAFORM_DATA_DIR,
                                          "cluster_info.yaml")

        with open(terraform_var_yaml, "w") as f:
            f.write(terraform_config_str)

        # config.ENV_DATA['dns_server'] = config.ENV_DATA['dns']
        template_vars = (f"\"dns_server: {config.ENV_DATA['dns']}"
                         f"\\nremove_rhcos_worker: 'yes'\\n\"")

        replace_content_in_file(terraform_var_yaml, "PLACEHOLDER",
                                template_vars)
        logger.info(f"cluster yaml file: {terraform_var_yaml}")
Beispiel #8
0
        def deploy(self, log_cli_level="DEBUG"):
            """
            Deploy
            """
            # Uploading pxe files
            master_count = 0
            worker_count = 0
            logger.info("Deploying OCP cluster for Bare Metal platform")
            logger.info(
                f"Openshift-installer will be using log level:{log_cli_level}")
            upload_file(
                self.host,
                constants.COMMON_CONF_FILE,
                os.path.join(self.helper_node_details["bm_dnsmasq_dir"],
                             "dnsmasq.common.conf"),
                self.user,
                key_file=self.private_key,
            )
            logger.info("Uploading PXE files")
            ocp_version = get_ocp_version()
            float_ocp_version = float(ocp_version)
            for machine in self.mgmt_details:
                if self.mgmt_details[machine].get(
                        "cluster_name") or self.mgmt_details[machine].get(
                            "extra_node"):
                    pxe_file_path = self.create_pxe_files(
                        ocp_version=float_ocp_version,
                        role=self.mgmt_details[machine].get("role"),
                    )
                    upload_file(
                        server=self.host,
                        localpath=pxe_file_path,
                        remotepath=f"{self.helper_node_details['bm_tftp_dir']}"
                        f"/pxelinux.cfg/01-{self.mgmt_details[machine]['mac'].replace(':', '-')}",
                        user=self.user,
                        key_file=self.private_key,
                    )
            # Applying Permission
            cmd = f"chmod 755 -R {self.helper_node_details['bm_tftp_dir']}"
            self.helper_node_handler.exec_cmd(cmd=cmd)

            # Applying Permission
            cmd = f"chmod 755 -R {self.helper_node_details['bm_path_to_upload']}"
            self.helper_node_handler.exec_cmd(cmd=cmd)

            # Restarting dnsmasq service
            cmd = "systemctl restart dnsmasq"
            assert self.helper_node_handler.exec_cmd(
                cmd=cmd), "Failed to restart dnsmasq service"
            # Rebooting Machine with pxe boot
            api_record_ip_list = []
            apps_record_ip_list = []
            response_list = []
            cluster_name = f"{constants.BM_DEFAULT_CLUSTER_NAME}"
            self.aws.delete_hosted_zone(cluster_name=cluster_name,
                                        delete_zone=False)
            for machine in self.mgmt_details:
                if (self.mgmt_details[machine].get("cluster_name") ==
                        constants.BM_DEFAULT_CLUSTER_NAME):
                    if (self.mgmt_details[machine]["role"] ==
                            constants.BOOTSTRAP_MACHINE):
                        self.set_pxe_boot_and_reboot(machine)
                        bootstrap_ip = self.mgmt_details[machine]["ip"]
                        api_record_ip_list.append(
                            self.mgmt_details[machine]["ip"])

                    elif (self.mgmt_details[machine]["role"]
                          == constants.MASTER_MACHINE and
                          master_count < config.ENV_DATA["master_replicas"]):
                        self.set_pxe_boot_and_reboot(machine)
                        api_record_ip_list.append(
                            self.mgmt_details[machine]["ip"])
                        master_count += 1

                    elif (self.mgmt_details[machine]["role"]
                          == constants.WORKER_MACHINE and
                          worker_count < config.ENV_DATA["worker_replicas"]):
                        self.set_pxe_boot_and_reboot(machine)
                        apps_record_ip_list.append(
                            self.mgmt_details[machine]["ip"])
                        worker_count += 1

            logger.info("Configuring DNS records")
            zone_id = self.aws.get_hosted_zone_id(cluster_name=cluster_name)

            if config.ENV_DATA["worker_replicas"] == 0:
                apps_record_ip_list = api_record_ip_list
            for ip in api_record_ip_list:
                response_list.append(
                    self.aws.update_hosted_zone_record(
                        zone_id=zone_id,
                        record_name=f"api-int.{cluster_name}",
                        data=ip,
                        type="A",
                        operation_type="Add",
                    ))
                response_list.append(
                    self.aws.update_hosted_zone_record(
                        zone_id=zone_id,
                        record_name=f"api.{cluster_name}",
                        data=ip,
                        type="A",
                        operation_type="Add",
                    ))
            for ip in apps_record_ip_list:
                response_list.append(
                    self.aws.update_hosted_zone_record(
                        zone_id=zone_id,
                        record_name=f"*.apps.{cluster_name}",
                        data=ip,
                        type="A",
                        operation_type="Add",
                    ))

            logger.info("Waiting for Record Response")
            self.aws.wait_for_record_set(response_list=response_list)
            logger.info("Records Created Successfully")
            logger.info("waiting for bootstrap to complete")
            try:
                run_cmd(
                    f"{self.installer} wait-for bootstrap-complete "
                    f"--dir {self.cluster_path} "
                    f"--log-level {log_cli_level}",
                    timeout=3600,
                )
            except CommandFailed as e:
                if constants.GATHER_BOOTSTRAP_PATTERN in str(e):
                    try:
                        gather_bootstrap()
                    except Exception as ex:
                        logger.error(ex)
                raise e

            OCP.set_kubeconfig(self.kubeconfig)
            wait_for_all_nodes_csr_and_approve()
            # wait for image registry to show-up
            co = "image-registry"
            wait_for_co(co)

            # patch image registry to null
            self.configure_storage_for_image_registry(self.kubeconfig)

            # wait for install to complete
            logger.info("waiting for install to complete")
            run_cmd(
                f"{self.installer} wait-for install-complete "
                f"--dir {self.cluster_path} "
                f"--log-level {log_cli_level}",
                timeout=1800,
            )
            logger.info("Removing Bootstrap Ip for DNS Records")
            self.aws.update_hosted_zone_record(
                zone_id=zone_id,
                record_name=f"api-int.{cluster_name}",
                data=bootstrap_ip,
                type="A",
                operation_type="Delete",
            )
            self.aws.update_hosted_zone_record(
                zone_id=zone_id,
                record_name=f"api.{cluster_name}",
                data=bootstrap_ip,
                type="A",
                operation_type="Delete",
            )
            # Approving CSRs here in-case if any exists
            approve_pending_csr()

            self.test_cluster()
            logger.info("Performing Disk cleanup")
            clean_disk()
            # We need NTP for OCS cluster to become clean
            worker_timeout = 400 * config.ENV_DATA["worker_replicas"]
            master_timeout = 400 * config.ENV_DATA["master_replicas"]
            if master_timeout <= worker_timeout:
                chrony_timeout = worker_timeout
            else:
                chrony_timeout = master_timeout
            configure_chrony_and_wait_for_machineconfig_status(
                node_type="all", timeout=chrony_timeout)