コード例 #1
0
    def test_must_gather(self):
        """
        Tests functionality of: oc adm must-gather

        """

        # Make logs root directory
        logger.info("Creating logs Directory")
        directory = self.make_directory()
        logger.info(f"Creating {directory}_ocs_logs - Done!")

        # Collect OCS logs
        logger.info("Collecting Logs")
        collect_ocs_logs(dir_name=directory, ocp=False)
        logger.info("Collecting logs - Done!")

        # Compare running pods list to "/pods" subdirectories
        logger.info("Checking logs tree")
        logs = self.get_log_directories(directory)
        pods = self.get_ocs_pods()
        logger.info(f"Logs: {logs}")
        logger.info(f"pods list: {pods}")
        assert set(sorted(logs)) == set(sorted(pods)), (
            "List of openshift-storage pods are not equal to list of logs directories"
            f"list of pods: {pods}"
            f"list of log directories: {logs}"
        )

        # 2nd test: Verify logs file are not empty
        logs_dir_list = self.search_log_files(directory)
        assert self.check_file_size(logs_dir_list), (
            "One or more log file are empty"
        )
コード例 #2
0
ファイル: ocscilib.py プロジェクト: agarwal-mudit/ocs-ci
def pytest_runtest_makereport(item, call):
    outcome = yield
    rep = outcome.get_result()
    # we only look at actual failing test calls, not setup/teardown
    if (rep.when == "call" and rep.failed
            and ocsci_config.RUN.get('cli_params').get('collect-logs')):
        test_case_name = item.name
        mcg = True if any(x in item.location[0]
                          for x in ['mcg', 'ecosystem']) else False
        try:
            collect_ocs_logs(dir_name=test_case_name, mcg=mcg)
        except Exception as ex:
            log.error(f"Failed to collect OCS logs. Error: {ex}")

    # Collect Prometheus metrics if specified in gather_metrics_on_fail marker
    if ((rep.when == "setup" or rep.when == "call") and rep.failed
            and item.get_closest_marker('gather_metrics_on_fail')):
        metrics = item.get_closest_marker('gather_metrics_on_fail').args
        try:
            collect_prometheus_metrics(metrics, f'{item.name}-{call.when}',
                                       call.start, call.stop)
        except Exception as ex:
            log.error(f"Failed to collect prometheus metrics. Error: {ex}")

    # Get the performance metrics when tests fails for scale or performance tag
    from tests.helpers import collect_performance_stats
    if ((rep.when == "setup" or rep.when == "call") and rep.failed
            and (item.get_closest_marker('scale')
                 or item.get_closest_marker('performance'))):
        test_case_name = item.name
        try:
            collect_performance_stats(test_case_name)
        except Exception as ex:
            log.error(f"Failed to collect performance stats. Error: {ex}")
コード例 #3
0
ファイル: ocscilib.py プロジェクト: oritwas/ocs-ci
def pytest_runtest_makereport(item, call):
    outcome = yield
    rep = outcome.get_result()
    # we only look at actual failing test calls, not setup/teardown
    if (
        rep.when == "call"
        and rep.failed
        and ocsci_config.RUN.get('cli_params').get('collect-logs')
    ):
        test_case_name = item.name
        collect_ocs_logs(test_case_name)

    # Collect Prometheus metrics if specified in gather_metrics_on_fail marker
    if (
        (rep.when == "setup" or rep.when == "call")
        and rep.failed
        and item.get_closest_marker('gather_metrics_on_fail')
    ):
        metrics = item.get_closest_marker('gather_metrics_on_fail').args
        collect_prometheus_metrics(
            metrics,
            f'{item.name}-{call.when}',
            call.start,
            call.stop
        )
コード例 #4
0
def pytest_runtest_makereport(item, call):
    outcome = yield
    rep = outcome.get_result()
    # we only look at actual failing test calls, not setup/teardown
    if (rep.when == "call" and rep.failed
            and ocsci_config.RUN.get('cli_params').get('collect-logs')):
        test_case_name = item.name
        collect_ocs_logs(test_case_name)
コード例 #5
0
    def collect_must_gather(self):
        """
        Collect ocs_must_gather and copy the logs to a temporary folder.

        """
        temp_folder = tempfile.mkdtemp()
        collect_ocs_logs(dir_name=temp_folder, ocp=False)
        self.root = temp_folder + "_ocs_logs"
コード例 #6
0
ファイル: ocscilib.py プロジェクト: ramkiperiy/ocs-ci
def pytest_runtest_makereport(item, call):
    outcome = yield
    rep = outcome.get_result()
    # we only look at actual failing test calls, not setup/teardown
    if rep.failed and ocsci_config.RUN.get("cli_params").get("collect-logs"):
        test_case_name = item.name
        ocp_logs_collection = (True if any(x in item.location[0] for x in [
            "ecosystem",
            "e2e/performance",
            "test_ceph_csidriver_runs_on_non_ocs_nodes",
        ]) else False)
        ocs_logs_collection = (False if any(
            x in item.location[0] for x in ["_ui", "must_gather"]) else True)
        mcg_logs_collection = (True if any(
            x in item.location[0] for x in ["mcg", "ecosystem"]) else False)
        try:
            if not ocsci_config.RUN.get("is_ocp_deployment_failed"):
                collect_ocs_logs(
                    dir_name=test_case_name,
                    ocp=ocp_logs_collection,
                    ocs=ocs_logs_collection,
                    mcg=mcg_logs_collection,
                )
        except Exception:
            log.exception("Failed to collect OCS logs")

    # Collect Prometheus metrics if specified in gather_metrics_on_fail marker
    if ((rep.when == "setup" or rep.when == "call") and rep.failed
            and item.get_closest_marker("gather_metrics_on_fail")):
        metrics = item.get_closest_marker("gather_metrics_on_fail").args
        try:
            collect_prometheus_metrics(metrics, f"{item.name}-{call.when}",
                                       call.start, call.stop)
        except Exception:
            log.exception("Failed to collect prometheus metrics")

    # Get the performance metrics when tests fails for scale or performance tag
    from ocs_ci.helpers.helpers import collect_performance_stats

    if ((rep.when == "setup" or rep.when == "call") and rep.failed
            and (item.get_closest_marker("scale")
                 or item.get_closest_marker("performance"))):
        test_case_name = item.name
        try:
            collect_performance_stats(test_case_name)
        except Exception:
            log.exception("Failed to collect performance stats")
コード例 #7
0
    def deploy_cluster(self, log_cli_level='DEBUG'):
        """
        We are handling both OCP and OCS deployment here based on flags

        Args:
            log_cli_level (str): log level for installer (default: DEBUG)
        """
        if not config.ENV_DATA['skip_ocp_deployment']:
            if is_cluster_running(self.cluster_path):
                logger.warning(
                    "OCP cluster is already running, skipping installation")
            else:
                try:
                    self.deploy_ocp(log_cli_level)
                    self.post_ocp_deploy()
                except Exception as e:
                    logger.error(e)
                    if config.REPORTING['gather_on_deploy_failure']:
                        collect_ocs_logs('deployment', ocs=False)
                    raise

        if not config.ENV_DATA['skip_ocs_deployment']:
            try:
                self.deploy_ocs()
            except Exception as e:
                logger.error(e)
                if config.REPORTING['gather_on_deploy_failure']:
                    # Let's do the collections separately to guard against one
                    # of them failing
                    collect_ocs_logs('deployment', ocs=False)
                    collect_ocs_logs('deployment', ocp=False)
                raise
        else:
            logger.warning("OCS deployment will be skipped")
コード例 #8
0
ファイル: test_must_gather.py プロジェクト: humblec/ocs-ci
    def test_must_gather(self):
        """
        Tests functionality of: oc adm must-gather

        """
        # Fetch pod details
        pods = pod.get_all_pods(namespace='openshift-storage')
        pods = [each.name for each in pods]

        # Make logs root directory
        logger.info("Creating logs Directory")
        directory = self.make_directory()
        logger.info(f"Creating {directory}_ocs_logs - Done!")

        # Collect OCS logs
        logger.info("Collecting Logs")
        collect_ocs_logs(dir_name=directory, ocp=False)
        logger.info("Collecting logs - Done!")

        # Compare running pods list to "/pods" subdirectories
        must_gather_helper = re.compile(r'must-gather-.*.-helper')
        logger.info("Checking logs tree")
        logs = [
            logs for logs in self.get_log_directories(directory)
            if not (must_gather_helper.match(logs))
        ]
        logger.info(f"Logs: {logs}")
        logger.info(f"pods list: {pods}")
        assert set(sorted(logs)) == set(sorted(pods)), (
            f"List of openshift-storage pods are not equal to list of logs "
            f"directories list of pods: {pods} list of log directories: {logs}"
        )

        # 2nd test: Verify logs file are not empty
        logs_dir_list = self.search_log_files(directory)
        assert self.check_file_size(logs_dir_list), (
            "One or more log file are empty")

        # Find must_gather_commands directory for verification
        for dir_root, dirs, files in os.walk(directory + "_ocs_logs"):
            if os.path.basename(dir_root) == 'must_gather_commands':
                logger.info(
                    f"Found must_gather_commands directory - {dir_root}")
                assert 'json_output' in dirs, (
                    "json_output directory is not present in "
                    "must_gather_commands directory.")
                assert files, (
                    "No files present in must_gather_commands directory.")
                cmd_files_path = [
                    os.path.join(dir_root, file_name) for file_name in files
                ]
                json_output_dir = os.path.join(dir_root, 'json_output')
                break

        # Verify that command output files are present as expected
        assert sorted(constants.MUST_GATHER_COMMANDS) == sorted(files), (
            f"Actual and expected commands output files are not matching.\n"
            f"Actual: {files}\nExpected: {constants.MUST_GATHER_COMMANDS}")

        # Verify that files for command output in json are present as expected
        commands_json = os.listdir(json_output_dir)
        assert sorted(
            constants.MUST_GATHER_COMMANDS_JSON) == sorted(commands_json), (
                f"Actual and expected json output commands files are not "
                f"matching.\nActual: {commands_json}\n"
                f"Expected: {constants.MUST_GATHER_COMMANDS_JSON}")

        # Verify that command output files are not empty
        empty_files = []
        json_cmd_files_path = [
            os.path.join(json_output_dir, file_name)
            for file_name in commands_json
        ]
        for file_path in cmd_files_path + json_cmd_files_path:
            if not os.path.getsize(file_path) > 0:
                empty_files.append(file_path)
        assert not empty_files, f"These files are empty: {empty_files}"
コード例 #9
0
ファイル: deployment.py プロジェクト: AaruniAggarwal/ocs-ci
    def deploy_cluster(self, log_cli_level="DEBUG"):
        """
        We are handling both OCP and OCS deployment here based on flags

        Args:
            log_cli_level (str): log level for installer (default: DEBUG)
        """
        if not config.ENV_DATA["skip_ocp_deployment"]:
            if is_cluster_running(self.cluster_path):
                logger.warning(
                    "OCP cluster is already running, skipping installation")
            else:
                try:
                    self.deploy_ocp(log_cli_level)
                    self.post_ocp_deploy()
                except Exception as e:
                    config.RUN["is_ocp_deployment_failed"] = True
                    logger.error(e)
                    if config.REPORTING["gather_on_deploy_failure"]:
                        collect_ocs_logs("deployment", ocs=False)
                    raise

        # Deployment of network split scripts via machineconfig API happens
        # before OCS deployment.
        if config.DEPLOYMENT.get("network_split_setup"):
            master_zones = config.ENV_DATA.get("master_availability_zones")
            worker_zones = config.ENV_DATA.get("worker_availability_zones")
            # special external zone, which is directly defined by ip addr list,
            # such zone could represent external services, which we could block
            # access to via ax-bx-cx network split
            if config.DEPLOYMENT.get("network_split_zonex_addrs") is not None:
                x_addr_list = config.DEPLOYMENT[
                    "network_split_zonex_addrs"].split(",")
            else:
                x_addr_list = None
            if config.DEPLOYMENT.get("arbiter_deployment"):
                arbiter_zone = self.get_arbiter_location()
                logger.debug("detected arbiter zone: %s", arbiter_zone)
            else:
                arbiter_zone = None
            # TODO: use temporary directory for all temporary files of
            # ocs-deployment, not just here in this particular case
            tmp_path = Path(tempfile.mkdtemp(prefix="ocs-ci-deployment-"))
            logger.debug("created temporary directory %s", tmp_path)
            setup_netsplit(tmp_path, master_zones, worker_zones, x_addr_list,
                           arbiter_zone)

        if not config.ENV_DATA["skip_ocs_deployment"]:
            try:
                self.deploy_ocs()

                if config.REPORTING["collect_logs_on_success_run"]:
                    collect_ocs_logs("deployment",
                                     ocp=False,
                                     status_failure=False)
            except Exception as e:
                logger.error(e)
                if config.REPORTING["gather_on_deploy_failure"]:
                    # Let's do the collections separately to guard against one
                    # of them failing
                    collect_ocs_logs("deployment", ocs=False)
                    collect_ocs_logs("deployment", ocp=False)
                raise
        else:
            logger.warning("OCS deployment will be skipped")