示例#1
0
def run_must_gather(log_dir_path, image, command=None):
    """
    Runs the must-gather tool against the cluster

    Args:
        log_dir_path (str): directory for dumped must-gather logs
        image (str): must-gather image registry path
        command (str): optional command to execute within the must-gather image

    Returns:
        mg_output (str): must-gather cli output

    """
    # Must-gather has many changes on 4.6 which add more time to the collection.
    # https://github.com/red-hat-storage/ocs-ci/issues/3240
    mg_output = ""
    ocs_version = version.get_semantic_ocs_version_from_config()
    timeout = 1500 if ocs_version >= version.VERSION_4_6 else 600
    must_gather_timeout = ocsci_config.REPORTING.get("must_gather_timeout",
                                                     timeout)

    log.info(f"Must gather image: {image} will be used.")
    create_directory_path(log_dir_path)
    cmd = f"adm must-gather --image={image} --dest-dir={log_dir_path}"
    if command:
        cmd += f" -- {command}"

    log.info(f"OCS logs will be placed in location {log_dir_path}")
    occli = OCP()
    try:
        mg_output = occli.exec_oc_cmd(cmd,
                                      out_yaml_format=False,
                                      timeout=must_gather_timeout)
    except CommandFailed as ex:
        log.error(f"Failed during must gather logs! Error: {ex}"
                  f"Must-Gather Output: {mg_output}")
    except TimeoutExpired as ex:
        log.error(
            f"Timeout {must_gather_timeout}s for must-gather reached, command"
            f" exited with error: {ex}"
            f"Must-Gather Output: {mg_output}")
    return mg_output
示例#2
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import (
        get_pods_having_label,
        download_file_from_pod,
        Pod,
    )

    nb_db_label = (constants.NOOBAA_DB_LABEL_46_AND_UNDER
                   if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7 else
                   constants.NOOBAA_DB_LABEL_47_AND_ABOVE)
    try:
        nb_db_pod = Pod(**get_pods_having_label(
            label=nb_db_label, namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
    except IndexError:
        log.warning(
            "Unable to find pod using label `%s` in namespace `%s`",
            nb_db_label,
            defaults.ROOK_CLUSTER_NAMESPACE,
        )
        return
    ocs_log_dir_path = os.path.join(log_dir_path, "noobaa_db_dump")
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, "nbcore.gz")
    if float(ocsci_config.ENV_DATA["ocs_version"]) < 4.7:
        cmd = "mongodump --archive=nbcore.gz --gzip --db=nbcore"
    else:
        cmd = 'bash -c "pg_dump nbcore | gzip > nbcore.gz"'

    nb_db_pod.exec_cmd_on_pod(cmd)
    download_file_from_pod(
        pod_name=nb_db_pod.name,
        remotepath="/opt/app-root/src/nbcore.gz",
        localpath=ocs_log_dir_path,
        namespace=defaults.ROOK_CLUSTER_NAMESPACE,
    )
示例#3
0
def main(argv=None):
    arguments = argv or sys.argv[1:]
    init_ocsci_conf(arguments)
    for i in range(framework.config.nclusters):
        framework.config.switch_ctx(i)
        pytest_logs_dir = utils.ocsci_log_path()
        utils.create_directory_path(framework.config.RUN["log_dir"])
    arguments.extend(
        [
            "-p",
            "ocs_ci.framework.pytest_customization.ocscilib",
            "-p",
            "ocs_ci.framework.pytest_customization.marks",
            "-p",
            "ocs_ci.framework.pytest_customization.reports",
            "--logger-logsdir",
            pytest_logs_dir,
        ]
    )
    return pytest.main(arguments)
示例#4
0
def collect_noobaa_db_dump(log_dir_path):
    """
    Collect the Noobaa DB dump

    Args:
        log_dir_path (str): directory for dumped Noobaa DB

    """
    from ocs_ci.ocs.resources.pod import get_pods_having_label, download_file_from_pod, Pod
    nb_db_pod = Pod(
        **get_pods_having_label(label=constants.NOOBAA_DB_LABEL,
                                namespace=defaults.ROOK_CLUSTER_NAMESPACE)[0])
    ocs_log_dir_path = os.path.join(log_dir_path, 'noobaa_db_dump')
    create_directory_path(ocs_log_dir_path)
    ocs_log_dir_path = os.path.join(ocs_log_dir_path, 'nbcore.gz')
    nb_db_pod.exec_cmd_on_pod(
        "mongodump --archive=nbcore.gz --gzip --db=nbcore")
    download_file_from_pod(pod_name=nb_db_pod.name,
                           remotepath="/opt/app-root/src/nbcore.gz",
                           localpath=ocs_log_dir_path,
                           namespace=defaults.ROOK_CLUSTER_NAMESPACE)
示例#5
0
def main(argv=None):
    arguments = argv or sys.argv[1:]
    init_ocsci_conf(arguments)
    pytest_logs_dir = utils.ocsci_log_path()
    utils.create_directory_path(framework.config.RUN["log_dir"])
    launch_name = f"{utils.get_testrun_name()}-{getuser()}"
    arguments.extend(
        [
            "-p",
            "ocs_ci.framework.pytest_customization.ocscilib",
            "-p",
            "ocs_ci.framework.pytest_customization.marks",
            "-p",
            "ocs_ci.framework.pytest_customization.reports",
            "--logger-logsdir",
            pytest_logs_dir,
            "--rp-launch",
            launch_name,
        ]
    )
    return pytest.main(arguments)
示例#6
0
        def __init__(self):
            super(VSPHEREUPI.OCPDeployment, self).__init__()
            self.public_key = {}
            self.upi_repo_path = os.path.join(constants.EXTERNAL_DIR,
                                              'installer')
            self.previous_dir = os.getcwd()

            # Download terraform binary based on ocp version and
            # update the installer path in ENV_DATA
            # use "0.11.14" for releases below OCP 4.5
            terraform_version = config.DEPLOYMENT['terraform_version']
            terraform_installer = get_terraform(version=terraform_version)
            config.ENV_DATA['terraform_installer'] = terraform_installer

            # Initialize Terraform
            self.terraform_data_dir = os.path.join(
                self.cluster_path, constants.TERRAFORM_DATA_DIR)
            create_directory_path(self.terraform_data_dir)
            self.terraform_work_dir = constants.VSPHERE_DIR
            self.terraform = Terraform(self.terraform_work_dir)
            ocp_version = get_ocp_version()
            self.folder_structure = False
            if Version.coerce(ocp_version) >= Version.coerce('4.5'):
                self.folder_structure = True
示例#7
0
    def add_nodes(self):
        """
        Add new nodes to the cluster
        """
        # create separate directory for scale-up terraform data
        scaleup_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR
        )
        create_directory_path(scaleup_terraform_data_dir)
        logger.info(
            f"scale-up terraform data directory: {scaleup_terraform_data_dir}"
        )

        # git clone repo from openshift-misc
        clone_repo(
            constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path
        )

        # modify scale-up repo
        self.modify_scaleup_repo()

        config.ENV_DATA['vsphere_resource_pool'] = config.ENV_DATA.get(
            "cluster_name"
        )

        # sync guest time with host
        if config.ENV_DATA.get('sync_time_with_host'):
            sync_time_with_host(constants.SCALEUP_VSPHERE_MACHINE_CONF, True)

        # get the RHCOS worker list
        self.rhcos_ips = get_node_ips()
        logger.info(f"RHCOS IP's: {json.dumps(self.rhcos_ips)}")

        # generate terraform variable for scaling nodes
        self.generate_terraform_vars_for_scaleup()

        # Add nodes using terraform
        scaleup_terraform = Terraform(constants.SCALEUP_VSPHERE_DIR)
        previous_dir = os.getcwd()
        os.chdir(scaleup_terraform_data_dir)
        scaleup_terraform.initialize()
        scaleup_terraform.apply(self.scale_up_terraform_var)
        scaleup_terraform_tfstate = os.path.join(
            scaleup_terraform_data_dir,
            "terraform.tfstate"
        )
        out = scaleup_terraform.output(
            scaleup_terraform_tfstate,
            "rhel_worker"
        )
        rhel_worker_nodes = json.loads(out)['value']
        logger.info(f"RHEL worker nodes: {rhel_worker_nodes}")
        os.chdir(previous_dir)

        # Install OCP on rhel nodes
        rhel_install = OCPINSTALLRHEL(rhel_worker_nodes)
        rhel_install.prepare_rhel_nodes()
        rhel_install.execute_ansible_playbook()

        # Giving some time to settle down the new nodes
        time.sleep(self.wait_time)

        # wait for nodes to be in READY state
        wait_for_nodes_status(timeout=300)
示例#8
0
    def add_nodes(self):
        """
        Add new nodes to the cluster
        """
        # create separate directory for scale-up terraform data
        scaleup_terraform_data_dir = os.path.join(
            self.cluster_path,
            constants.TERRAFORM_DATA_DIR,
            constants.SCALEUP_TERRAFORM_DATA_DIR,
        )
        create_directory_path(scaleup_terraform_data_dir)
        logger.info(
            f"scale-up terraform data directory: {scaleup_terraform_data_dir}")

        # git clone repo from openshift-misc
        clone_repo(constants.VSPHERE_SCALEUP_REPO, self.upi_scale_up_repo_path)

        # git clone repo from cluster-launcher
        clone_repo(constants.VSPHERE_CLUSTER_LAUNCHER,
                   self.cluster_launcer_repo_path)

        helpers = VSPHEREHELPERS()
        helpers.modify_scaleup_repo()

        config.ENV_DATA["vsphere_resource_pool"] = config.ENV_DATA.get(
            "cluster_name")

        # sync guest time with host
        sync_time_with_host_file = constants.SCALEUP_VSPHERE_MACHINE_CONF
        if config.ENV_DATA["folder_structure"]:
            sync_time_with_host_file = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{get_ocp_version(seperator='_')}",
                constants.CLUSTER_LAUNCHER_MACHINE_CONF,
            )
        if config.ENV_DATA.get("sync_time_with_host"):
            sync_time_with_host(sync_time_with_host_file, True)

        # get the RHCOS worker list
        rhcos_ips = get_node_ips()
        logger.info(f"RHCOS IP's: {json.dumps(rhcos_ips)}")

        # generate terraform variable for scaling nodes
        self.scale_up_terraform_var = helpers.generate_terraform_vars_for_scaleup(
            rhcos_ips)

        # choose the vsphere_dir based on OCP version
        # generate cluster_info and config yaml files
        # for OCP version greater than 4.4
        vsphere_dir = constants.SCALEUP_VSPHERE_DIR
        rhel_module = "rhel-worker"
        if Version.coerce(self.ocp_version) >= Version.coerce("4.5"):
            vsphere_dir = os.path.join(
                constants.CLUSTER_LAUNCHER_VSPHERE_DIR,
                f"aos-{get_ocp_version('_')}",
                "vsphere",
            )
            helpers.generate_cluster_info()
            helpers.generate_config_yaml()
            rhel_module = "RHEL_WORKER_LIST"

        # Add nodes using terraform
        scaleup_terraform = Terraform(vsphere_dir)
        previous_dir = os.getcwd()
        os.chdir(scaleup_terraform_data_dir)
        scaleup_terraform.initialize()
        scaleup_terraform.apply(self.scale_up_terraform_var)
        scaleup_terraform_tfstate = os.path.join(scaleup_terraform_data_dir,
                                                 "terraform.tfstate")
        out = scaleup_terraform.output(scaleup_terraform_tfstate, rhel_module)
        if config.ENV_DATA["folder_structure"]:
            rhel_worker_nodes = out.strip().replace('"', "").split(",")
        else:
            rhel_worker_nodes = json.loads(out)["value"]

        logger.info(f"RHEL worker nodes: {rhel_worker_nodes}")
        os.chdir(previous_dir)

        # Install OCP on rhel nodes
        rhel_install = OCPINSTALLRHEL(rhel_worker_nodes)
        rhel_install.prepare_rhel_nodes()
        rhel_install.execute_ansible_playbook()

        # Giving some time to settle down the new nodes
        time.sleep(self.wait_time)

        # wait for nodes to be in READY state
        wait_for_nodes_status(timeout=300)