Ejemplo n.º 1
0
        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2"
            )
            ocp_install_template_path = os.path.join(
                "ocp-deployment", ocp_install_template
            )
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA
            )

            # Parse the rendered YAML so that we can manipulate the object directly
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj['pullSecret'] = self.get_pull_secret()
            install_config_obj['sshKey'] = self.get_ssh_key()
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path, "install-config.yaml")
            with open(install_config, "w") as f:
                f.write(install_config_str)
Ejemplo n.º 2
0
    def generate_terraform_vars_for_scaleup(self):
        """
        Generates the terraform variables file for scaling nodes
        """
        logger.info("Generating terraform variables for scaling nodes")
        _templating = Templating()
        scale_up_terraform_var_template = "scale_up_terraform.tfvars.j2"
        scale_up_terraform_var_template_path = os.path.join(
            "ocp-deployment", scale_up_terraform_var_template
        )
        scale_up_terraform_config_str = _templating.render_template(
            scale_up_terraform_var_template_path, config.ENV_DATA
        )
        scale_up_terraform_var_yaml = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR,
            "scale_up_terraform.tfvars.yaml"
        )
        with open(scale_up_terraform_var_yaml, "w") as f:
            f.write(scale_up_terraform_config_str)

        self.scale_up_terraform_var = convert_yaml2tfvars(
            scale_up_terraform_var_yaml
        )
        logger.info(
            f"scale-up terraform variable file: {self.scale_up_terraform_var}"
        )

        # append RHCOS ip list to terraform variable file
        with open(self.scale_up_terraform_var, "a+") as fd:
            fd.write(f"rhcos_list = {json.dumps(self.rhcos_ips)}")
Ejemplo n.º 3
0
        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2")
            ocp_install_template_path = os.path.join("ocp-deployment",
                                                     ocp_install_template)
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA)
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj["pullSecret"] = self.get_pull_secret()
            install_config_obj["sshKey"] = self.get_ssh_key()
            install_config_obj["platform"]["vsphere"][
                "apiVIP"] = self.ipi_details.get("vmware_ipi_api_vip")
            install_config_obj["platform"]["vsphere"][
                "ingressVIP"] = self.ipi_details.get("vmware_ipi_ingress_vip")
            install_config_obj["metadata"]["name"] = self.ipi_details.get(
                "vmware_ipi_default_cluster_name")
            install_config_obj["baseDomain"] = self.ipi_details.get(
                "vmware_ipi_default_base_domain")
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path,
                                          "install-config.yaml")

            with open(install_config, "w") as f:
                f.write(install_config_str)
Ejemplo n.º 4
0
 def create_config(self):
     """
     Creates the OCP deploy config for the Bare Metal
     """
     # Generate install-config from template
     _templating = Templating()
     ocp_install_template = (
         f"install-config-{self.deployment_platform}-"
         f"{self.deployment_type}.yaml.j2")
     ocp_install_template_path = os.path.join("ocp-deployment",
                                              ocp_install_template)
     install_config_str = _templating.render_template(
         ocp_install_template_path, config.ENV_DATA)
     install_config_obj = yaml.safe_load(install_config_str)
     install_config_obj["pullSecret"] = self.get_pull_secret()
     install_config_obj["sshKey"] = self.get_ssh_key()
     install_config_obj["metadata"][
         "name"] = constants.BM_DEFAULT_CLUSTER_NAME
     install_config_str = yaml.safe_dump(install_config_obj)
     install_config = os.path.join(self.cluster_path,
                                   "install-config.yaml")
     install_config_backup = os.path.join(self.cluster_path,
                                          "install-config.yaml.backup")
     with open(install_config, "w") as f:
         f.write(install_config_str)
     with open(install_config_backup, "w") as f:
         f.write(install_config_str)
Ejemplo n.º 5
0
    def create_inventory(self):
        """
        Creates the inventory file

        Returns:
            str: Path to inventory file

        """
        inventory_data = {}
        inventory_data['pod_kubeconfig'] = self.pod_kubeconfig_path
        inventory_data['pod_pull_secret'] = self.pod_pull_secret_path
        inventory_data['rhel_worker_nodes'] = self.rhel_worker_nodes

        logger.info("Generating inventory file")
        _templating = Templating()
        inventory_template_path = os.path.join("ocp-deployment",
                                               constants.INVENTORY_TEMPLATE)
        inventory_config_str = _templating.render_template(
            inventory_template_path, inventory_data)
        inventory_yaml = os.path.join(self.cluster_path,
                                      constants.TERRAFORM_DATA_DIR,
                                      constants.INVENTORY_FILE)
        logger.info(f"inventory_config_str: {inventory_config_str}")
        logger.info(f"inventory_yaml: {inventory_yaml}")
        with open(inventory_yaml, "w") as f:
            f.write(inventory_config_str)

        return inventory_yaml
Ejemplo n.º 6
0
 def __init__(self):
     """
     Initialize required variables
     """
     self.cluster_path = config.ENV_DATA['cluster_path']
     self.kubeconfig = os.path.join(self.cluster_path,
                                    config.RUN.get('kubeconfig_location'))
     self.folder_structure = config.ENV_DATA.get('folder_structure')
     self.ocp_version = get_ocp_version(seperator="_")
     self._templating = Templating()
Ejemplo n.º 7
0
    def create_inventory_for_haproxy(self):
        """
        Creates the inventory file for haproxy

        Returns:
            str: Path to inventory file for haproxy

        """
        inventory_data_haproxy = {}
        inventory_data_haproxy['ssh_key_private'] = self.ssh_key_pem
        inventory_data_haproxy['platform'] = config.ENV_DATA['platform']
        inventory_data_haproxy['rhel_worker_nodes'] = self.rhel_worker_nodes
        lb_address = get_module_ip(
            self.terraform_state_file,
            constants.LOAD_BALANCER_MODULE
        )
        control_plane_address = get_module_ip(
            self.terraform_state_file,
            constants.CONTROL_PLANE
        )
        compute_address = get_module_ip(
            self.terraform_state_file,
            constants.COMPUTE_MODULE
        )
        inventory_data_haproxy['lb'] = lb_address
        inventory_data_haproxy['masters'] = control_plane_address
        inventory_data_haproxy['workers'] = compute_address

        logger.info("Generating inventory file for haproxy")
        _templating = Templating()
        inventory_template_path_haproxy = os.path.join(
            "ocp-deployment", constants.INVENTORY_TEMPLATE_HAPROXY
        )
        inventory_config_haproxy_str = _templating.render_template(
            inventory_template_path_haproxy, inventory_data_haproxy
        )
        inventory_yaml_haproxy = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.INVENTORY_FILE_HAPROXY
        )
        logger.debug(f"inventory contents for haproxy: {inventory_config_haproxy_str}")
        logger.debug(f"inventory yaml: {inventory_yaml_haproxy}")
        with open(inventory_yaml_haproxy, "w") as f:
            f.write(inventory_config_haproxy_str)

        return inventory_yaml_haproxy
Ejemplo n.º 8
0
def create_terraform_var_file(terraform_var_template):
    """
    Creates the terraform variable file from jinja template

    Args:
        terraform_var_template (str): terraform template in jinja format

    """
    _templating = Templating()
    terraform_var_template_path = os.path.join("ocp-deployment",
                                               terraform_var_template)
    terraform_config_str = _templating.render_template(
        terraform_var_template_path, config.ENV_DATA)

    terraform_var_yaml = os.path.join(config.ENV_DATA['cluster_path'],
                                      constants.TERRAFORM_DATA_DIR,
                                      "terraform.tfvars.yaml")
    with open(terraform_var_yaml, "w") as f:
        f.write(terraform_config_str)

    convert_yaml2tfvars(terraform_var_yaml)
Ejemplo n.º 9
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_openshift_installer()

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}"
            )
            upload_file(
                config.ENV_DATA.get('httpd_server'),
                bootstrap_path,
                remote_path,
                config.ENV_DATA.get('httpd_server_user'),
                config.ENV_DATA.get('httpd_server_password')
            )

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}"
            )
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.MASTER_IGN
            )
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.WORKER_IGN
            )
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (
                f"{config.ENV_DATA.get('cluster_name')}."
                f"{config.ENV_DATA.get('base_domain')}"
            )
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join(
                "ocp-deployment", terraform_var_template
            )
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA
            )

            terraform_var_yaml = os.path.join(
                self.cluster_path,
                constants.TERRAFORM_DATA_DIR,
                "terraform.tfvars.yaml"
            )
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update the machine configurations
            update_machine_conf()

            # sync guest time with host
            if config.ENV_DATA.get('sync_time_with_host'):
                sync_time_with_host(constants.INSTALLER_MACHINE_CONF, True)
Ejemplo n.º 10
0
def login_ui(console_url=None):
    """
    Login to OpenShift Console

    Args:
        console_url (str): ocp console url

    return:
        driver (Selenium WebDriver)

    """
    default_console = False
    if not console_url:
        console_url = get_ocp_url()
        default_console = True
    logger.info("Get password of OCP console")
    password = get_kubeadmin_password()
    password = password.rstrip()

    ocp_version = get_ocp_version()
    login_loc = locators[ocp_version]["login"]

    browser = ocsci_config.UI_SELENIUM.get("browser_type")
    if browser == "chrome":
        logger.info("chrome browser")
        chrome_options = Options()

        ignore_ssl = ocsci_config.UI_SELENIUM.get("ignore_ssl")
        if ignore_ssl:
            chrome_options.add_argument("--ignore-ssl-errors=yes")
            chrome_options.add_argument("--ignore-certificate-errors")
            chrome_options.add_argument("--allow-insecure-localhost")
            if config.ENV_DATA.get("import_clusters_to_acm"):
                # Dev shm should be disabled when sending big amonut characters, like the cert sections of a kubeconfig
                chrome_options.add_argument("--disable-dev-shm-usage")
            capabilities = chrome_options.to_capabilities()
            capabilities["acceptInsecureCerts"] = True

        # headless browsers are web browsers without a GUI
        headless = ocsci_config.UI_SELENIUM.get("headless")
        if headless:
            chrome_options.add_argument("--headless")
            chrome_options.add_argument("window-size=1920,1400")

        # use proxy server, if required
        if (
            config.DEPLOYMENT.get("proxy")
            or config.DEPLOYMENT.get("disconnected")
            or config.ENV_DATA.get("private_link")
        ) and config.ENV_DATA.get("client_http_proxy"):
            client_proxy = urlparse(config.ENV_DATA.get("client_http_proxy"))
            # there is a big difference between configuring not authenticated
            # and authenticated proxy server for Chrome:
            # * not authenticated proxy can be configured via --proxy-server
            #   command line parameter
            # * authenticated proxy have to be provided through customly
            #   created Extension and it doesn't work in headless mode!
            if not client_proxy.username:
                # not authenticated proxy
                logger.info(
                    f"Configuring not authenticated proxy ('{client_proxy.geturl()}') for browser"
                )
                chrome_options.add_argument(f"--proxy-server={client_proxy.geturl()}")
            elif not headless:
                # authenticated proxy, not headless mode
                # create Chrome extension with proxy settings
                logger.info(
                    f"Configuring authenticated proxy ('{client_proxy.geturl()}') for browser"
                )
                _templating = Templating()
                manifest_json = _templating.render_template(
                    constants.CHROME_PROXY_EXTENSION_MANIFEST_TEMPLATE, {}
                )
                background_js = _templating.render_template(
                    constants.CHROME_PROXY_EXTENSION_BACKGROUND_TEMPLATE,
                    {"proxy": client_proxy},
                )
                pluginfile = "/tmp/proxy_auth_plugin.zip"
                with zipfile.ZipFile(pluginfile, "w") as zp:
                    zp.writestr("manifest.json", manifest_json)
                    zp.writestr("background.js", background_js)
                chrome_options.add_extension(pluginfile)
            else:
                # authenticated proxy, headless mode
                logger.error(
                    "It is not possible to configure authenticated proxy "
                    f"('{client_proxy.geturl()}') for browser in headless mode"
                )
                raise NotSupportedProxyConfiguration(
                    "Unable to configure authenticated proxy in headless browser mode!"
                )

        chrome_browser_type = ocsci_config.UI_SELENIUM.get("chrome_type")
        driver = webdriver.Chrome(
            ChromeDriverManager(chrome_type=chrome_browser_type).install(),
            options=chrome_options,
        )
    else:
        raise ValueError(f"Not Support on {browser}")

    wait = WebDriverWait(driver, 60)
    driver.maximize_window()
    driver.implicitly_wait(10)
    driver.get(console_url)
    # Validate proceeding to the login console before taking any action:
    proceed_to_login_console(driver)
    if config.ENV_DATA.get("flexy_deployment") or config.ENV_DATA.get(
        "import_clusters_to_acm"
    ):
        try:
            element = wait.until(
                ec.element_to_be_clickable(
                    (
                        login_loc["kubeadmin_login_approval"][1],
                        login_loc["kubeadmin_login_approval"][0],
                    )
                )
            )
            element.click()
        except TimeoutException as e:
            take_screenshot(driver)
            copy_dom(driver)
            logger.error(e)
    element = wait.until(
        ec.element_to_be_clickable((login_loc["username"][1], login_loc["username"][0]))
    )
    take_screenshot(driver)
    copy_dom(driver)
    element.send_keys("kubeadmin")
    element = wait.until(
        ec.element_to_be_clickable((login_loc["password"][1], login_loc["password"][0]))
    )
    element.send_keys(password)
    element = wait.until(
        ec.element_to_be_clickable(
            (login_loc["click_login"][1], login_loc["click_login"][0])
        )
    )
    element.click()
    if default_console:
        WebDriverWait(driver, 60).until(ec.title_is(login_loc["ocp_page"]))
    return driver
Ejemplo n.º 11
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path)

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}")
            upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path,
                        remote_path, config.ENV_DATA.get('httpd_server_user'),
                        config.ENV_DATA.get('httpd_server_password'))

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace(
                "/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}")
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN)
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN)
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}."
                              f"{config.ENV_DATA.get('base_domain')}")
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join("ocp-deployment",
                                                       terraform_var_template)
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA)

            terraform_var_yaml = os.path.join(self.cluster_path,
                                              constants.TERRAFORM_DATA_DIR,
                                              "terraform.tfvars.yaml")
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update gateway and DNS
            if config.ENV_DATA.get('gateway'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        '${cidrhost(var.machine_cidr,1)}',
                                        f"{config.ENV_DATA.get('gateway')}")

            if config.ENV_DATA.get('dns'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        constants.INSTALLER_DEFAULT_DNS,
                                        f"{config.ENV_DATA.get('dns')}")

            # update the zone in route
            if config.ENV_DATA.get('region'):
                def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get(
                    'region')
                replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz",
                                        def_zone)

            # increase memory
            if config.ENV_DATA.get('memory'):
                replace_content_in_file(constants.INSTALLER_MACHINE_CONF,
                                        '${var.memory}',
                                        config.ENV_DATA.get('memory'))

            # increase CPUs
            worker_num_cpus = config.ENV_DATA.get('worker_num_cpus')
            master_num_cpus = config.ENV_DATA.get('master_num_cpus')
            if worker_num_cpus or master_num_cpus:
                with open(constants.VSPHERE_MAIN, 'r') as fd:
                    obj = hcl.load(fd)
                    if worker_num_cpus:
                        obj['module']['compute']['num_cpu'] = worker_num_cpus
                    if master_num_cpus:
                        obj['module']['control_plane'][
                            'num_cpu'] = master_num_cpus
                # Dump data to json file since hcl module
                # doesn't support dumping of data in HCL format
                dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json")
                os.rename(constants.VSPHERE_MAIN,
                          f"{constants.VSPHERE_MAIN}.backup")
Ejemplo n.º 12
0
class VSPHEREHELPERS(object):
    """
    Helper class for vSphere
    """
    def __init__(self):
        """
        Initialize required variables
        """
        self.cluster_path = config.ENV_DATA['cluster_path']
        self.kubeconfig = os.path.join(self.cluster_path,
                                       config.RUN.get('kubeconfig_location'))
        self.folder_structure = config.ENV_DATA.get('folder_structure')
        self.ocp_version = get_ocp_version(seperator="_")
        self._templating = Templating()

    def generate_terraform_vars_for_scaleup(self, rhcos_ips):
        """
        Generates the terraform variables file for scaling nodes
        """
        self.scale_up_terraform_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR)
        scale_up_terraform_var_yaml = os.path.join(
            self.scale_up_terraform_dir, "scale_up_terraform.tfvars.yaml")
        config.ENV_DATA['cluster_info_path'] = self.scale_up_terraform_dir
        config.ENV_DATA['credentials_path'] = self.scale_up_terraform_dir

        if self.folder_structure:
            logger.info("Generating terraform variables for "
                        "scaling nodes with folder structure")
            scale_up_terraform_var_template_with_folder_structure = (
                "scale_up_terraform_with_folder_structure.tfvars.j2")
            scale_up_terraform_var_template_path_with_folder_structure = (
                os.path.join(
                    "ocp-deployment",
                    scale_up_terraform_var_template_with_folder_structure))

            scale_up_terraform_config_str_with_folder_structure = (
                self._templating.render_template(
                    scale_up_terraform_var_template_path_with_folder_structure,
                    config.ENV_DATA))

            with open(scale_up_terraform_var_yaml, "w") as f:
                f.write(scale_up_terraform_config_str_with_folder_structure)

            scale_up_terraform_var = convert_yaml2tfvars(
                scale_up_terraform_var_yaml)
            replace_content_in_file(scale_up_terraform_var, "None", "")

        else:
            logger.info("Generating terraform variables for scaling"
                        " nodes without folder structure")
            scale_up_terraform_var_template = "scale_up_terraform.tfvars.j2"
            scale_up_terraform_var_template_path = os.path.join(
                "ocp-deployment", scale_up_terraform_var_template)
            scale_up_terraform_config_str = self._templating.render_template(
                scale_up_terraform_var_template_path, config.ENV_DATA)

            with open(scale_up_terraform_var_yaml, "w") as f:
                f.write(scale_up_terraform_config_str)

            scale_up_terraform_var = convert_yaml2tfvars(
                scale_up_terraform_var_yaml)

            # append RHCOS ip list to terraform variable file
            with open(scale_up_terraform_var, "a+") as fd:
                fd.write(f"rhcos_list = {json.dumps(rhcos_ips)}")

        logger.info(
            f"scale-up terraform variable file: {scale_up_terraform_var}")

        return scale_up_terraform_var

    def modify_scaleup_repo(self):
        """
        Modify the scale-up repo. Considering the user experience, removing
        the access and secret keys and variable from appropriate location
        in the scale-up repo
        """
        # importing here to avoid circular dependancy
        from ocs_ci.deployment.vmware import change_vm_root_disk_size
        if self.folder_structure:
            logger.info("Modifying scaleup repo for folder structure")
            # modify default_map.yaml
            default_map_path = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{self.ocp_version}", "default_map.yaml")
            dict_data = load_yaml(default_map_path)
            dict_data['cluster_domain'] = config.ENV_DATA['base_domain']
            dict_data['vsphere']['vcsa-qe']['datacenter'] = (
                config.ENV_DATA['vsphere_datacenter'])
            dict_data['vsphere']['vcsa-qe']['datastore'] = (
                config.ENV_DATA['vsphere_datastore'])
            dict_data['vsphere']['vcsa-qe']['network'] = (
                config.ENV_DATA['vm_network'])
            dict_data['vsphere']['vcsa-qe']['cpus'] = (
                config.ENV_DATA['rhel_num_cpus'])
            dict_data['vsphere']['vcsa-qe']['memory'] = (
                config.ENV_DATA['rhel_memory'])
            dict_data['vsphere']['vcsa-qe']['root_volume_size'] = (
                config.ENV_DATA.get('root_disk_size', '120'))

            dict_data['vsphere']['vcsa-qe']['image'] = (
                config.ENV_DATA['rhel_template'])

            dump_data_to_temp_yaml(dict_data, default_map_path)
        else:
            # remove access and secret key from constants.SCALEUP_VSPHERE_MAIN
            access_key = 'access_key       = "${var.aws_access_key}"'
            secret_key = 'secret_key       = "${var.aws_secret_key}"'
            replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN,
                                    f"{access_key}", " ")
            replace_content_in_file(constants.SCALEUP_VSPHERE_MAIN,
                                    f"{secret_key}", " ")

            # remove access and secret key from constants.SCALEUP_VSPHERE_ROUTE53
            route53_access_key = 'access_key = "${var.access_key}"'
            route53_secret_key = 'secret_key = "${var.secret_key}"'
            replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53,
                                    f"{route53_access_key}", " ")
            replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53,
                                    f"{route53_secret_key}", " ")

            replace_content_in_file(constants.SCALEUP_VSPHERE_ROUTE53,
                                    "us-east-1",
                                    f"{config.ENV_DATA.get('region')}")

            # remove access and secret variables from scale-up repo
            remove_keys_from_tf_variable_file(
                constants.SCALEUP_VSPHERE_VARIABLES,
                ['aws_access_key', 'aws_secret_key'])
            remove_keys_from_tf_variable_file(
                constants.SCALEUP_VSPHERE_ROUTE53_VARIABLES,
                ['access_key', 'secret_key'])

            # change root disk size
            change_vm_root_disk_size(constants.SCALEUP_VSPHERE_MACHINE_CONF)

    def generate_cluster_info(self):
        """
        Generates the cluster information file
        """
        logger.info("Generating cluster information file")

        # get kubeconfig and upload to httpd server
        kubeconfig = os.path.join(self.cluster_path,
                                  config.RUN.get('kubeconfig_location'))
        remote_path = os.path.join(config.ENV_DATA.get('path_to_upload'),
                                   f"{config.RUN.get('run_id')}_kubeconfig")
        upload_file(config.ENV_DATA.get('httpd_server'), kubeconfig,
                    remote_path, config.ENV_DATA.get('httpd_server_user'),
                    config.ENV_DATA.get('httpd_server_password'))

        #  Form the kubeconfig url path
        kubeconfig_url_path = os.path.join(
            'http://', config.ENV_DATA.get('httpd_server'),
            remote_path.lstrip('/var/www/html/'))
        config.ENV_DATA['kubeconfig_url'] = kubeconfig_url_path

        # get the infra_id
        infra_id = get_infra_id(self.cluster_path)
        config.ENV_DATA['infra_id'] = infra_id

        # get the cluster id
        cluster_id = get_cluster_id(self.cluster_path)
        config.ENV_DATA['cluster_id'] = cluster_id

        # fetch the installer version
        installer_version_str = run_cmd(
            f"{config.RUN['bin_dir']}/openshift-install version")
        installer_version = installer_version_str.split()[1]
        config.ENV_DATA['installer_version'] = installer_version

        # get the major and minor version of OCP
        version_obj = Version(installer_version)
        ocp_version_x = version_obj.major
        ocp_version_y = version_obj.minor
        config.ENV_DATA['ocp_version_x'] = ocp_version_x
        config.ENV_DATA['ocp_version_y'] = ocp_version_y

        # generate the cluster info yaml file
        terraform_var_template = "cluster_info.yaml.j2"
        terraform_var_template_path = os.path.join("ocp-deployment",
                                                   terraform_var_template)
        terraform_config_str = self._templating.render_template(
            terraform_var_template_path, config.ENV_DATA)
        terraform_var_yaml = os.path.join(self.cluster_path,
                                          constants.TERRAFORM_DATA_DIR,
                                          constants.SCALEUP_TERRAFORM_DATA_DIR,
                                          "cluster_info.yaml")

        with open(terraform_var_yaml, "w") as f:
            f.write(terraform_config_str)

        # config.ENV_DATA['dns_server'] = config.ENV_DATA['dns']
        template_vars = (f"\"dns_server: {config.ENV_DATA['dns']}"
                         f"\\nremove_rhcos_worker: 'yes'\\n\"")

        replace_content_in_file(terraform_var_yaml, "PLACEHOLDER",
                                template_vars)
        logger.info(f"cluster yaml file: {terraform_var_yaml}")

    def generate_config_yaml(self):
        """
        Generate config yaml file
        """
        # create config directory in scale_up_terraform_data directory
        sclaeup_data_config_dir = os.path.join(
            self.cluster_path, constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR, "config")
        create_directory_path(sclaeup_data_config_dir)

        # generate config yaml file
        scale_up_config_var_template = "scale_up_config.yaml.j2"
        scale_up_config_var_template_path = os.path.join(
            "ocp-deployment", scale_up_config_var_template)
        config.ENV_DATA['ssh_key_private'] = (
            config.DEPLOYMENT['ssh_key_private'])
        scale_up_config_str = self._templating.render_template(
            scale_up_config_var_template_path, config.ENV_DATA)
        scale_config_var_yaml = os.path.join(sclaeup_data_config_dir,
                                             "config.yaml")

        with open(scale_config_var_yaml, "w") as f:
            f.write(scale_up_config_str)

        logger.debug(f"scaleup config yaml file : {scale_config_var_yaml}")