def test_metrics(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df) assert analyzer.y_true == testdata_df["target"].tolist() assert analyzer.y_pred == testdata_df["max_likelihood"].tolist() assert isinstance(analyzer.mean_loss, float) assert np.sum(analyzer.support) == len(testdata_df) assert len(analyzer.tps) == len(testdata_df)
def test_save_load_analyzer(workspace, testdata_df): test_set = "rwc" analyzer = analyze.PredictionAnalyzer(testdata_df, test_set=test_set) save_path = os.path.join(workspace, "analyzer.pkl") analyzer.save(save_path) assert os.path.exists(save_path) # Prove you can load it back in, too. analyzer2 = analyze.PredictionAnalyzer.load(save_path, test_set) assert analyzer.y_true == analyzer2.y_true assert analyzer.y_pred == analyzer2.y_pred
def test_dataset_summary(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df) scores = analyzer.dataset_summary() assert set(scores.index) == \ set(["overall", "rwc", "uiowa", "philharmonia"]) assert scores.shape[1] == 4
def test_dataset_class_wise(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df) scores = analyzer.dataset_class_wise() assert set(scores.index.levels[0]) == \ set(["overall", "rwc", "uiowa", "philharmonia"]) assert scores.shape[1] == 4
def test_summary_scores(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df, test_set="rwc") scores = analyzer.summary_scores() assert set(scores.index) == set(["precision", "recall", "f1score", "accuracy"]) assert all(np.isfinite(scores))
def test_class_wise_scores(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df, test_set="rwc") scores = analyzer.class_wise_scores() assert set(scores.columns) == set(["precision", "recall", "f1score", "support"]) assert all(np.isfinite(scores))
def test_prediction_analyzer(testdata_df): analyzer = analyze.PredictionAnalyzer(testdata_df, test_set="rwc") assert analyzer is not None