Ejemplo n.º 1
0
def create_terraform_var_file(terraform_var_template):
    """
    Creates the terraform variable file from jinja template

    Args:
        terraform_var_template (str): terraform template in jinja format

    """
    _templating = Templating()
    terraform_var_template_path = os.path.join("ocp-deployment",
                                               terraform_var_template)
    terraform_config_str = _templating.render_template(
        terraform_var_template_path, config.ENV_DATA)

    terraform_var_yaml = os.path.join(
        config.ENV_DATA["cluster_path"],
        constants.TERRAFORM_DATA_DIR,
        "terraform.tfvars.yaml",
    )
    with open(terraform_var_yaml, "w") as f:
        f.write(terraform_config_str)

    convert_yaml2tfvars(terraform_var_yaml)
Ejemplo n.º 2
0
        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2")
            ocp_install_template_path = os.path.join("ocp-deployment",
                                                     ocp_install_template)
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA)

            # Parse the rendered YAML so that we can manipulate the object directly
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj["pullSecret"] = self.get_pull_secret()
            install_config_obj["sshKey"] = self.get_ssh_key()
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path,
                                          "install-config.yaml")
            with open(install_config, "w") as f:
                f.write(install_config_str)
Ejemplo n.º 3
0
        def create_config(self):
            """
            Creates the OCP deploy config for the vSphere
            """
            # Generate install-config from template
            _templating = Templating()
            ocp_install_template = (
                f"install-config-{self.deployment_platform}-"
                f"{self.deployment_type}.yaml.j2"
            )
            ocp_install_template_path = os.path.join(
                "ocp-deployment", ocp_install_template
            )
            install_config_str = _templating.render_template(
                ocp_install_template_path, config.ENV_DATA
            )
            install_config_obj = yaml.safe_load(install_config_str)
            install_config_obj["pullSecret"] = self.get_pull_secret()
            install_config_obj["sshKey"] = self.get_ssh_key()
            install_config_obj["platform"]["vsphere"]["apiVIP"] = self.ipi_details.get(
                "vmware_ipi_api_vip"
            )
            install_config_obj["platform"]["vsphere"][
                "ingressVIP"
            ] = self.ipi_details.get("vmware_ipi_ingress_vip")
            install_config_obj["metadata"]["name"] = self.ipi_details.get(
                "vmware_ipi_default_cluster_name"
            )
            install_config_obj["baseDomain"] = self.ipi_details.get(
                "vmware_ipi_default_base_domain"
            )
            install_config_str = yaml.safe_dump(install_config_obj)
            install_config = os.path.join(self.cluster_path, "install-config.yaml")

            with open(install_config, "w") as f:
                f.write(install_config_str)
Ejemplo n.º 4
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_openshift_installer()

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}"
            )
            upload_file(
                config.ENV_DATA.get('httpd_server'),
                bootstrap_path,
                remote_path,
                config.ENV_DATA.get('httpd_server_user'),
                config.ENV_DATA.get('httpd_server_password')
            )

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace("/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}"
            )
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.MASTER_IGN
            )
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'),
                constants.WORKER_IGN
            )
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (
                f"{config.ENV_DATA.get('cluster_name')}."
                f"{config.ENV_DATA.get('base_domain')}"
            )
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join(
                "ocp-deployment", terraform_var_template
            )
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA
            )

            terraform_var_yaml = os.path.join(
                self.cluster_path,
                constants.TERRAFORM_DATA_DIR,
                "terraform.tfvars.yaml"
            )
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update the machine configurations
            update_machine_conf()

            # sync guest time with host
            if config.ENV_DATA.get('sync_time_with_host'):
                sync_time_with_host(constants.INSTALLER_MACHINE_CONF, True)
Ejemplo n.º 5
0
def login_ui(console_url=None):
    """
    Login to OpenShift Console

    Args:
        console_url (str): ocp console url

    return:
        driver (Selenium WebDriver)

    """
    default_console = False
    if not console_url:
        console_url = get_ocp_url()
        default_console = True
    logger.info("Get password of OCP console")
    password = get_kubeadmin_password()
    password = password.rstrip()

    ocp_version = get_ocp_version()
    login_loc = locators[ocp_version]["login"]

    browser = ocsci_config.UI_SELENIUM.get("browser_type")
    if browser == "chrome":
        logger.info("chrome browser")
        chrome_options = Options()

        ignore_ssl = ocsci_config.UI_SELENIUM.get("ignore_ssl")
        if ignore_ssl:
            chrome_options.add_argument("--ignore-ssl-errors=yes")
            chrome_options.add_argument("--ignore-certificate-errors")
            chrome_options.add_argument("--allow-insecure-localhost")
            if config.ENV_DATA.get("import_clusters_to_acm"):
                # Dev shm should be disabled when sending big amonut characters, like the cert sections of a kubeconfig
                chrome_options.add_argument("--disable-dev-shm-usage")
            capabilities = chrome_options.to_capabilities()
            capabilities["acceptInsecureCerts"] = True

        # headless browsers are web browsers without a GUI
        headless = ocsci_config.UI_SELENIUM.get("headless")
        if headless:
            chrome_options.add_argument("--headless")
            chrome_options.add_argument("window-size=1920,1400")

        # use proxy server, if required
        if (
            config.DEPLOYMENT.get("proxy")
            or config.DEPLOYMENT.get("disconnected")
            or config.ENV_DATA.get("private_link")
        ) and config.ENV_DATA.get("client_http_proxy"):
            client_proxy = urlparse(config.ENV_DATA.get("client_http_proxy"))
            # there is a big difference between configuring not authenticated
            # and authenticated proxy server for Chrome:
            # * not authenticated proxy can be configured via --proxy-server
            #   command line parameter
            # * authenticated proxy have to be provided through customly
            #   created Extension and it doesn't work in headless mode!
            if not client_proxy.username:
                # not authenticated proxy
                logger.info(
                    f"Configuring not authenticated proxy ('{client_proxy.geturl()}') for browser"
                )
                chrome_options.add_argument(f"--proxy-server={client_proxy.geturl()}")
            elif not headless:
                # authenticated proxy, not headless mode
                # create Chrome extension with proxy settings
                logger.info(
                    f"Configuring authenticated proxy ('{client_proxy.geturl()}') for browser"
                )
                _templating = Templating()
                manifest_json = _templating.render_template(
                    constants.CHROME_PROXY_EXTENSION_MANIFEST_TEMPLATE, {}
                )
                background_js = _templating.render_template(
                    constants.CHROME_PROXY_EXTENSION_BACKGROUND_TEMPLATE,
                    {"proxy": client_proxy},
                )
                pluginfile = "/tmp/proxy_auth_plugin.zip"
                with zipfile.ZipFile(pluginfile, "w") as zp:
                    zp.writestr("manifest.json", manifest_json)
                    zp.writestr("background.js", background_js)
                chrome_options.add_extension(pluginfile)
            else:
                # authenticated proxy, headless mode
                logger.error(
                    "It is not possible to configure authenticated proxy "
                    f"('{client_proxy.geturl()}') for browser in headless mode"
                )
                raise NotSupportedProxyConfiguration(
                    "Unable to configure authenticated proxy in headless browser mode!"
                )

        chrome_browser_type = ocsci_config.UI_SELENIUM.get("chrome_type")
        driver = webdriver.Chrome(
            ChromeDriverManager(chrome_type=chrome_browser_type).install(),
            options=chrome_options,
        )
    else:
        raise ValueError(f"Not Support on {browser}")

    wait = WebDriverWait(driver, 60)
    driver.maximize_window()
    driver.implicitly_wait(10)
    driver.get(console_url)
    # Validate proceeding to the login console before taking any action:
    proceed_to_login_console(driver)
    if config.ENV_DATA.get("flexy_deployment") or config.ENV_DATA.get(
        "import_clusters_to_acm"
    ):
        try:
            element = wait.until(
                ec.element_to_be_clickable(
                    (
                        login_loc["kubeadmin_login_approval"][1],
                        login_loc["kubeadmin_login_approval"][0],
                    )
                )
            )
            element.click()
        except TimeoutException as e:
            take_screenshot(driver)
            copy_dom(driver)
            logger.error(e)
    element = wait.until(
        ec.element_to_be_clickable((login_loc["username"][1], login_loc["username"][0]))
    )
    take_screenshot(driver)
    copy_dom(driver)
    element.send_keys("kubeadmin")
    element = wait.until(
        ec.element_to_be_clickable((login_loc["password"][1], login_loc["password"][0]))
    )
    element.send_keys(password)
    element = wait.until(
        ec.element_to_be_clickable(
            (login_loc["click_login"][1], login_loc["click_login"][0])
        )
    )
    element.click()
    if default_console:
        WebDriverWait(driver, 60).until(ec.title_is(login_loc["ocp_page"]))
    return driver
Ejemplo n.º 6
0
        def deploy_prereq(self):
            """
            Pre-Requisites for vSphere UPI Deployment
            """
            super(VSPHEREUPI.OCPDeployment, self).deploy_prereq()
            # create ignitions
            self.create_ignitions()
            self.kubeconfig = os.path.join(
                self.cluster_path, config.RUN.get('kubeconfig_location'))

            # git clone repo from openshift installer
            clone_repo(constants.VSPHERE_INSTALLER_REPO, self.upi_repo_path)

            # upload bootstrap ignition to public access server
            bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'),
                                          constants.BOOTSTRAP_IGN)
            remote_path = os.path.join(
                config.ENV_DATA.get('path_to_upload'),
                f"{config.RUN.get('run_id')}_{constants.BOOTSTRAP_IGN}")
            upload_file(config.ENV_DATA.get('httpd_server'), bootstrap_path,
                        remote_path, config.ENV_DATA.get('httpd_server_user'),
                        config.ENV_DATA.get('httpd_server_password'))

            # generate bootstrap ignition url
            path_to_bootstrap_on_remote = remote_path.replace(
                "/var/www/html/", "")
            bootstrap_ignition_url = (
                f"http://{config.ENV_DATA.get('httpd_server')}/"
                f"{path_to_bootstrap_on_remote}")
            logger.info(f"bootstrap_ignition_url: {bootstrap_ignition_url}")
            config.ENV_DATA['bootstrap_ignition_url'] = bootstrap_ignition_url

            # load master and worker ignitions to variables
            master_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN)
            master_ignition = read_file_as_str(f"{master_ignition_path}")
            config.ENV_DATA['control_plane_ignition'] = master_ignition

            worker_ignition_path = os.path.join(
                config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN)
            worker_ignition = read_file_as_str(f"{worker_ignition_path}")
            config.ENV_DATA['compute_ignition'] = worker_ignition

            cluster_domain = (f"{config.ENV_DATA.get('cluster_name')}."
                              f"{config.ENV_DATA.get('base_domain')}")
            config.ENV_DATA['cluster_domain'] = cluster_domain

            # generate terraform variables from template
            logger.info("Generating terraform variables")
            _templating = Templating()
            terraform_var_template = "terraform.tfvars.j2"
            terraform_var_template_path = os.path.join("ocp-deployment",
                                                       terraform_var_template)
            terraform_config_str = _templating.render_template(
                terraform_var_template_path, config.ENV_DATA)

            terraform_var_yaml = os.path.join(self.cluster_path,
                                              constants.TERRAFORM_DATA_DIR,
                                              "terraform.tfvars.yaml")
            with open(terraform_var_yaml, "w") as f:
                f.write(terraform_config_str)
            self.terraform_var = convert_yaml2tfvars(terraform_var_yaml)

            # update gateway and DNS
            if config.ENV_DATA.get('gateway'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        '${cidrhost(var.machine_cidr,1)}',
                                        f"{config.ENV_DATA.get('gateway')}")

            if config.ENV_DATA.get('dns'):
                replace_content_in_file(constants.INSTALLER_IGNITION,
                                        constants.INSTALLER_DEFAULT_DNS,
                                        f"{config.ENV_DATA.get('dns')}")

            # update the zone in route
            if config.ENV_DATA.get('region'):
                def_zone = 'provider "aws" { region = "%s" } \n' % config.ENV_DATA.get(
                    'region')
                replace_content_in_file(constants.INSTALLER_ROUTE53, "xyz",
                                        def_zone)

            # increase memory
            if config.ENV_DATA.get('memory'):
                replace_content_in_file(constants.INSTALLER_MACHINE_CONF,
                                        '${var.memory}',
                                        config.ENV_DATA.get('memory'))

            # increase CPUs
            worker_num_cpus = config.ENV_DATA.get('worker_num_cpus')
            master_num_cpus = config.ENV_DATA.get('master_num_cpus')
            if worker_num_cpus or master_num_cpus:
                with open(constants.VSPHERE_MAIN, 'r') as fd:
                    obj = hcl.load(fd)
                    if worker_num_cpus:
                        obj['module']['compute']['num_cpu'] = worker_num_cpus
                    if master_num_cpus:
                        obj['module']['control_plane'][
                            'num_cpu'] = master_num_cpus
                # Dump data to json file since hcl module
                # doesn't support dumping of data in HCL format
                dump_data_to_json(obj, f"{constants.VSPHERE_MAIN}.json")
                os.rename(constants.VSPHERE_MAIN,
                          f"{constants.VSPHERE_MAIN}.backup")