def print_evaluation_stats(test_stats): for output_field, result in test_stats.items(): if output_field != COMBINED or (output_field == COMBINED and len(test_stats) > 2): logger.info(f"\n===== {output_field} =====") for metric in sorted(list(result)): if metric not in SKIP_EVAL_METRICS: value = result[metric] if isinstance(value, OrderedDict): value_repr = repr_ordered_dict(value) else: value_repr = pformat(result[metric], indent=2) logger.info(f"{metric}: {value_repr}")
def print_test_results(test_stats): for output_field, result in test_stats.items(): if (output_field != 'combined' or (output_field == 'combined' and len(test_stats) > 2)): logger.info('\n===== {} ====='.format(output_field)) for measure in sorted(list(result)): if measure != 'confusion_matrix' and measure != 'roc_curve': value = result[measure] if isinstance(value, OrderedDict): value_repr = repr_ordered_dict(value) else: value_repr = pformat(result[measure], indent=2) logger.info('{0}: {1}'.format(measure, value_repr))