def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() # we only look at actual failing test calls, not setup/teardown if (rep.when == "call" and rep.failed and ocsci_config.RUN.get('cli_params').get('collect-logs')): test_case_name = item.name mcg = True if any(x in item.location[0] for x in ['mcg', 'ecosystem']) else False try: collect_ocs_logs(dir_name=test_case_name, mcg=mcg) except Exception as ex: log.error(f"Failed to collect OCS logs. Error: {ex}") # Collect Prometheus metrics if specified in gather_metrics_on_fail marker if ((rep.when == "setup" or rep.when == "call") and rep.failed and item.get_closest_marker('gather_metrics_on_fail')): metrics = item.get_closest_marker('gather_metrics_on_fail').args try: collect_prometheus_metrics(metrics, f'{item.name}-{call.when}', call.start, call.stop) except Exception as ex: log.error(f"Failed to collect prometheus metrics. Error: {ex}") # Get the performance metrics when tests fails for scale or performance tag from tests.helpers import collect_performance_stats if ((rep.when == "setup" or rep.when == "call") and rep.failed and (item.get_closest_marker('scale') or item.get_closest_marker('performance'))): test_case_name = item.name try: collect_performance_stats(test_case_name) except Exception as ex: log.error(f"Failed to collect performance stats. Error: {ex}")
def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() # we only look at actual failing test calls, not setup/teardown if ( rep.when == "call" and rep.failed and ocsci_config.RUN.get('cli_params').get('collect-logs') ): test_case_name = item.name collect_ocs_logs(test_case_name) # Collect Prometheus metrics if specified in gather_metrics_on_fail marker if ( (rep.when == "setup" or rep.when == "call") and rep.failed and item.get_closest_marker('gather_metrics_on_fail') ): metrics = item.get_closest_marker('gather_metrics_on_fail').args collect_prometheus_metrics( metrics, f'{item.name}-{call.when}', call.start, call.stop )
def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() # we only look at actual failing test calls, not setup/teardown if rep.failed and ocsci_config.RUN.get("cli_params").get("collect-logs"): test_case_name = item.name ocp_logs_collection = (True if any(x in item.location[0] for x in [ "ecosystem", "e2e/performance", "test_ceph_csidriver_runs_on_non_ocs_nodes", ]) else False) ocs_logs_collection = (False if any( x in item.location[0] for x in ["_ui", "must_gather"]) else True) mcg_logs_collection = (True if any( x in item.location[0] for x in ["mcg", "ecosystem"]) else False) try: if not ocsci_config.RUN.get("is_ocp_deployment_failed"): collect_ocs_logs( dir_name=test_case_name, ocp=ocp_logs_collection, ocs=ocs_logs_collection, mcg=mcg_logs_collection, ) except Exception: log.exception("Failed to collect OCS logs") # Collect Prometheus metrics if specified in gather_metrics_on_fail marker if ((rep.when == "setup" or rep.when == "call") and rep.failed and item.get_closest_marker("gather_metrics_on_fail")): metrics = item.get_closest_marker("gather_metrics_on_fail").args try: collect_prometheus_metrics(metrics, f"{item.name}-{call.when}", call.start, call.stop) except Exception: log.exception("Failed to collect prometheus metrics") # Get the performance metrics when tests fails for scale or performance tag from ocs_ci.helpers.helpers import collect_performance_stats if ((rep.when == "setup" or rep.when == "call") and rep.failed and (item.get_closest_marker("scale") or item.get_closest_marker("performance"))): test_case_name = item.name try: collect_performance_stats(test_case_name) except Exception: log.exception("Failed to collect performance stats")