def test_multi_ops_multiple_times_sequentially():
    """Run 3 times test_full_psds using delete_ops to reset the metric"""
    gt = pd.read_csv(join(DATADIR, "baseline_validation_gt.csv"), sep="\t")
    metadata = pd.read_csv(join(DATADIR, "baseline_validation_metadata.csv"),
                           sep="\t")
    dets = []
    dets.append(
        pd.read_csv(join(DATADIR, "baseline_validation_AA_0.005.csv"),
                    sep="\t"))
    for k in range(5):
        dets.append(dets[0].sample(4500, random_state=7 * k))
        print(dets[k + 1])

    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    ref_psds_value = 0.07211376135412327
    for k in range(3):
        for det_t in dets:
            psds_eval.add_operating_point(det_t)
        psds = psds_eval.psds(0.0, 0.0, 100)
        assert psds.value == pytest.approx(ref_psds_value), \
            "PSDS was calculated incorrectly"
        psds_eval.clear_all_operating_points()
        assert psds_eval.num_operating_points() == 0
        assert len(psds_eval.operating_points) == 0
def test_example_1_paper_icassp(metadata):
    """Run PSDSEval on some sample data from the ICASSP paper"""
    det = pd.read_csv(join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(join(DATADIR, "test_1.gt"), sep="\t")
    # Record the checksums of the incoming data
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values
    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    # matrix (n_class, n_class) last col/row is world (for FP)
    exp_counts = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [1, 0, 1, 0],
                           [0, 0, 0, 0]])
    tpr = np.array([1., 1., 1.])
    fpr = np.array([12.857143, 12.857143, 0.])
    ctr = np.array([[0., 0., 0.], [0., 0., 0.], [720., 0., 0.]])
    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    np.testing.assert_allclose(psds_eval.operating_points.tpr[0], tpr)
    np.testing.assert_allclose(psds_eval.operating_points.fpr[0], fpr)
    np.testing.assert_allclose(psds_eval.operating_points.ctr[0], ctr)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds1.value == pytest.approx(0.9142857142857143), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
def test_files_from_dcase(metadata):
    """Run PSDSEval on some example data from DCASE"""
    det = pd.read_csv(join(DATADIR, "Y23R6_ppquxs_247.000_257000.det"),
                      sep="\t")
    gt = pd.read_csv(join(DATADIR, "Y23R6_ppquxs_247.000_257000.gt"), sep="\t")
    # Record the checksums of the incoming data
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values
    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    # matrix (n_class, n_class) last col/row is world (for FP)
    exp_counts = np.array([[1., 0., 1.], [1., 4., 0.], [0., 0., 0.]])
    tpr = np.array([0.25, 1.])
    fpr = np.array([12.857143, 0.])
    ctr = np.array([[0., 0.], [600.40026684, 0.]])
    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    np.testing.assert_allclose(psds_eval.operating_points.tpr[0], tpr)
    np.testing.assert_allclose(psds_eval.operating_points.fpr[0], fpr)
    np.testing.assert_allclose(psds_eval.operating_points.ctr[0], ctr)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds1.value == pytest.approx(0.6089285714285714), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
def test_empty_det():
    """Run the PSDSEval class with tables that contain no detections"""
    gt = pd.DataFrame({
        "filename": ["test.wav"],
        "onset": [2.4],
        "offset": [5.9],
        "event_label": ["c1"]
    })
    det = pd.DataFrame(columns=["filename", "onset", "offset", "event_label"])
    metadata = pd.DataFrame({"filename": ["test.wav"], "duration": [10.0]})
    # Record the checksums of the incoming data
    meta_hash = pd.util.hash_pandas_object(metadata).values
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values
    psds_eval = PSDSEval(class_names=['c1'],
                         metadata=metadata,
                         ground_truth=gt)
    exp_counts = np.array([[0, 0], [0, 0]])
    tpr = np.array([0.])
    fpr = np.array([0.])
    ctr = np.array([[0.]])
    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    np.testing.assert_allclose(psds_eval.operating_points.tpr[0], tpr)
    np.testing.assert_allclose(psds_eval.operating_points.fpr[0], fpr)
    np.testing.assert_allclose(psds_eval.operating_points.ctr[0], ctr)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds1.value == pytest.approx(0.0), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(metadata).values == meta_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
def test_example_4(metadata):
    """Run PSDSEval on some sample data and ensure the results are correct"""
    det = pd.read_csv(join(DATADIR, "test_4.det"), sep="\t")
    gt = pd.read_csv(join(DATADIR, "test_4.gt"), sep="\t")
    # Record the checksums of the incoming data
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values

    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    # matrix (n_class, n_class) last col/row is world (for FP)
    exp_counts = np.array([[2, 0, 0, 1, 0], [0, 0, 0, 1, 3], [0, 1, 1, 0, 0],
                           [0, 0, 0, 0, 2], [0, 0, 0, 0, 0]])
    tpr = np.array([1., 0., 1., 0.])
    fpr = np.array([0, 38.57142857, 0, 25.71428571])
    ctr = np.array([[0, 0, 0, 87.80487805], [0, 0, 0, 300],
                    [0, 156.52173913, 0, 0], [0, 0, 0, 0]])
    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    np.testing.assert_allclose(psds_eval.operating_points.tpr[0], tpr)
    np.testing.assert_allclose(psds_eval.operating_points.fpr[0], fpr)
    np.testing.assert_allclose(psds_eval.operating_points.ctr[0], ctr)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds1.value == pytest.approx(0.5), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
Beispiel #6
0
def test_add_operating_point_with_no_metadata():
    """Ensure that add_operating_point raises an error when metadata is none"""
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    psds_eval = PSDSEval(metadata=None, ground_truth=None)
    with pytest.raises(PSDSEvalError,
                       match="Ground Truth must be provided "
                             "before adding the first operating point"):
        psds_eval.add_operating_point(det)
Beispiel #7
0
def score_all(hyp_data, ref_data):
    # as the unique files are the same, only one unique call is enough
    metadata = pd.DataFrame(np.unique(hyp_data['filename']), columns=['filename'])
    metadata = metadata.assign(duration=10)

    psds_eval = PSDSEval(ground_truth=ref_data, metadata=metadata)
    info = {"threshold": 0.5}
    psds_eval.add_operating_point(hyp_data, info=info)
    return psds_eval.psds(max_efpr=100).value
Beispiel #8
0
def test_add_operating_point_with_zero_detections():
    """An error must not be raised when there are no detections"""
    det = pd.read_csv(os.path.join(DATADIR, "empty.det"), sep="\t")
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    psds_eval.add_operating_point(det)
    assert psds_eval.num_operating_points() == 1
    assert psds_eval.operating_points["id"][0] == \
        "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
Beispiel #9
0
def test_that_add_operating_point_added_a_point():
    """Ensure add_operating_point adds an operating point correctly"""
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    psds_eval.add_operating_point(det)
    assert psds_eval.num_operating_points() == 1
    assert psds_eval.operating_points["id"][0] == \
        "6f504797195d2df3bae13e416b8bf96ca89ec4e4e4d031dadadd72e382640387"
Beispiel #10
0
def test_add_operating_point_with_empty_dataframe():
    """Ensure add_operating_point raises an error when given an
    incorrect table"""
    det = pd.DataFrame()
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    with pytest.raises(PSDSEvalError,
                       match="The data columns need to match the following"):
        psds_eval.add_operating_point(det)
Beispiel #11
0
def test_that_add_operating_point_added_a_point():
    """Ensure add_operating_point adds an operating point correctly"""
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    psds_eval.add_operating_point(det)
    assert psds_eval.num_operating_points() == 1
    assert psds_eval.operating_points["id"][0] == \
        "423089ce6d6554174881f69f9d0e57a8be9f5bc682dfce301462a8753aa6ec5f"
Beispiel #12
0
def test_add_operating_point_with_wrong_data_format():
    """Ensure add_operating_point raises an error when the input is not a
    pandas table"""
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t").to_numpy()
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    with pytest.raises(PSDSEvalError,
                       match="The detection data must be provided "
                             "in a pandas.DataFrame"):
        psds_eval.add_operating_point(det)
def test_two_operating_points_second_has_filtered_out_gtc():
    """Tests a case where the gt coverage df becomes empty for the second op"""
    gt = pd.read_csv(join(DATADIR, "test_1.gt"), sep="\t")
    metadata = pd.read_csv(join(DATADIR, "test.metadata"), sep="\t")
    psds_eval = PSDSEval(1, 1, 1, ground_truth=gt, metadata=metadata)
    det = pd.read_csv(join(DATADIR, "test_1.det"), sep="\t")
    det2 = pd.read_csv(join(DATADIR, "test_1a.det"), sep="\t")
    psds_eval.add_operating_point(det)
    psds_eval.add_operating_point(det2)
    assert psds_eval.psds(0.0, 0.0, 100.0).value == pytest.approx(0.0), \
        "PSDS value was calculated incorrectly"
def test_two_operating_points_one_with_no_detections():
    """Tests a case where the dtc and gtc df's are empty for the second op"""
    gt = pd.read_csv(join(DATADIR, "test_1.gt"), sep="\t")
    metadata = pd.read_csv(join(DATADIR, "test.metadata"), sep="\t")
    psds_eval = PSDSEval(ground_truth=gt, metadata=metadata)
    det = pd.read_csv(join(DATADIR, "test_1.det"), sep="\t")
    det2 = pd.read_csv(join(DATADIR, "test_4.det"), sep="\t")
    psds_eval.add_operating_point(det)
    psds_eval.add_operating_point(det2)
    assert psds_eval.psds(0.0, 0.0, 100.0).value == \
        pytest.approx(0.9142857142857143), \
        "PSDS value was calculated incorrectly"
Beispiel #15
0
def compute_psds_from_operating_points(list_predictions,
                                       groundtruth_df,
                                       meta_df,
                                       dtc_threshold=0.5,
                                       gtc_threshold=0.5,
                                       cttc_threshold=0.3):
    psds = PSDSEval(dtc_threshold,
                    gtc_threshold,
                    cttc_threshold,
                    ground_truth=groundtruth_df,
                    metadata=meta_df)
    for prediction_df in list_predictions:
        psds.add_operating_point(prediction_df)
    return psds
Beispiel #16
0
def test_add_operating_point_with_info_using_column_names():
    """Check for non-permitted keys in the info"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det1 = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    info1 = {"counts": 0, "threshold1": 1}
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)

    with pytest.raises(PSDSEvalError,
                       match="the 'info' cannot contain the keys 'id', "
                             "'counts', 'tpr', 'fpr' or 'ctr'"):
        psds_eval.add_operating_point(det1, info=info1)
Beispiel #17
0
def test_unknown_class_constraint_check():
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det1 = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    info1 = {"name": "test_1", "threshold1": 1}
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)
    psds_eval.add_operating_point(det1, info=info1)
    constraints = pd.DataFrame([
        {"class_name": "class1", "constraint": "tpr", "value": 1.}])

    with pytest.raises(PSDSEvalError,
                       match="Unknown class: class1"):
        psds_eval.select_operating_points_per_class(constraints,
                                                    alpha_ct=1., beta=1.)
Beispiel #18
0
def test_delete_ops():
    """Perform deletion of ops"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    det_2 = pd.read_csv(os.path.join(DATADIR, "test_1a.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)

    assert psds_eval.operating_points.empty
    psds_eval.add_operating_point(det)
    psds_eval.add_operating_point(det_2)
    assert psds_eval.num_operating_points() == 2

    psds_eval.clear_all_operating_points()
    assert psds_eval.operating_points.empty
Beispiel #19
0
def test_add_operating_points_with_overlapping_events(table_name, raise_error):
    """Detections with overlapping events must raise an error"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det = pd.read_csv(os.path.join(DATADIR, table_name), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    if raise_error:
        with pytest.raises(
                PSDSEvalError,
                match="The detection dataframe provided has intersecting "
                "events/labels for the same class."):
            psds_eval.add_operating_point(det)
    else:
        psds_eval.add_operating_point(det)
        assert psds_eval.num_operating_points() == 1
Beispiel #20
0
def test_impossible_constraint_check():
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det1 = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    info1 = {"name": "test_1", "threshold1": 1}
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)
    psds_eval.add_operating_point(det1, info=info1)
    constraints = pd.DataFrame([
        {"class_name": "c2", "constraint": "fpr", "value": 11.},
        {"class_name": "c1", "constraint": "tpr", "value": 1.1}])
    chosen_op_points = \
        psds_eval.select_operating_points_per_class(constraints, alpha_ct=1.,
                                                    beta=1.)
    assert np.isnan(chosen_op_points.TPR[0]), \
        "NaN value is not returned for 0, 0 operating point"
    assert np.isnan(chosen_op_points.TPR[1]), \
        "NaN value is not returned for non-existing operating point"
Beispiel #21
0
def test_add_same_operating_point_with_different_info():
    """Check the use of conflicting info for the same operating point"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det1 = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    info1 = {"name": "test_1", "threshold1": 1}
    info2 = {"name": "test_1_2", "threshold2": 0}
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)
    psds_eval.add_operating_point(det1, info=info1)
    psds_eval.add_operating_point(det1, info=info2)
    assert psds_eval.num_operating_points() == 1
    assert psds_eval.operating_points.name[0] == "test_1", \
        "The info name is not correctly reported."
    assert psds_eval.operating_points.threshold1[0] == 1, \
        "The info threshold1 is not correctly reported."
    assert "threshold2" not in psds_eval.operating_points.columns, \
        "The info of ignored operating point modified the operating " \
        "points table."
Beispiel #22
0
def test_full_dcase_validset():
    """Run PSDSEval on all the example data from DCASE"""
    det = pd.read_csv(join(DATADIR, "baseline_validation_AA_0.005.csv"),
                      sep="\t")
    gt = pd.read_csv(join(DATADIR, "baseline_validation_gt.csv"), sep="\t")
    metadata = pd.read_csv(join(DATADIR, "baseline_validation_metadata.csv"),
                           sep="\t")
    # Record the checksums of the incoming data
    meta_hash = pd.util.hash_pandas_object(metadata).values
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values

    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    # matrix (n_class, n_class) last col/row is world (for FP)
    exp_counts = np.array(
        [[269, 9, 63, 41, 120, 13, 7, 18, 128, 2, 302],
         [5, 59, 4, 45, 29, 31, 35, 46, 86, 58,
          416], [54, 17, 129, 19, 105, 13, 14, 16, 82, 20, 585],
         [37, 43, 8, 164, 56, 9, 63, 63, 87, 7, 1100],
         [45, 10, 79, 73, 278, 7, 24, 51, 154, 22, 1480],
         [14, 22, 11, 24, 30, 41, 51, 26, 62, 43, 386],
         [3, 20, 12, 136, 96, 35, 87, 103, 97, 27, 840],
         [8, 41, 13, 119, 93, 48, 135, 127, 185, 32, 662],
         [89, 120, 74, 493, 825, 203, 403, 187, 966, 89, 1340],
         [0, 83, 1, 12, 58, 27, 46, 46, 120, 67, 390],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])

    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    # Check that all the psds metrics match
    assert psds1.value == pytest.approx(0.0044306914546640595), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(metadata).values == meta_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
Beispiel #23
0
def test_retrieve_desired_operating_point():
    """Check if operating points can be found with requested constraints"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det1 = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    det2 = pd.read_csv(os.path.join(DATADIR, "test_2.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    info1 = {"name": "test_1", "threshold1": 1}
    info2 = {"name": "test_2", "threshold2": 0}
    psds_eval = PSDSEval(dtc_threshold=0.5, gtc_threshold=0.5,
                         cttc_threshold=0.3, ground_truth=gt,
                         metadata=metadata)
    psds_eval.add_operating_point(det1, info=info1)
    psds_eval.add_operating_point(det2, info=info2)
    constraints = pd.DataFrame([
        {"class_name": "c1", "constraint": "tpr", "value": 1.},
        {"class_name": "c1", "constraint": "tpr", "value": 0.8},
        {"class_name": "c2", "constraint": "fpr", "value": 13.},
        {"class_name": "c3", "constraint": "efpr", "value": 240.},
        {"class_name": "c3", "constraint": "efpr", "value": 26.},
        {"class_name": "c1", "constraint": "fscore", "value": np.nan}])
    chosen_op_points = \
        psds_eval.select_operating_points_per_class(constraints, alpha_ct=1.,
                                                    beta=1.)
    assert chosen_op_points.name[0] == "test_1", \
        "Correct operating point is not chosen for tpr criteria with equality"
    assert chosen_op_points.name[1] == "test_1", \
        "Correct operating point is not chosen for tpr criteria with " \
        "inequality"
    assert chosen_op_points.name[2] == "test_1", \
        "Correct operating point is not chosen for fpr criteria with " \
        "inequality"
    assert chosen_op_points.name[3] == "test_1", \
        "Correct operating point is not chosen for efpr criteria with " \
        "equality"
    assert chosen_op_points.name[4] == "test_1", \
        "Correct operating point is not chosen for efpr criteria with " \
        "inequality"
    assert chosen_op_points.name[5] == "test_1", \
        "Correct operating point is not chosen for fscore criteria"
    assert chosen_op_points.Fscore[5] == pytest.approx(2./3.), \
        "Correct operating point is not chosen for fscore criteria"
Beispiel #24
0
def test_full_psds():
    """Run a full example of the PSDSEval and test the result"""
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)

    # matrix (n_class, n_class) last col/row is world (for FP)
    exp_counts = np.array([[1, 0, 0, 1], [0, 1, 0, 1], [1, 0, 1, 0],
                           [0, 0, 0, 0]])

    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts), \
        "Expected counts do not match"
    psds = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds.value == pytest.approx(0.9142857142857143), \
        "PSDS was calculated incorrectly"
Beispiel #25
0
def psds_results(predictions, gtruth_df, gtruth_durations):
    try:
        dtc_threshold = 0.5
        gtc_threshold = 0.5
        cttc_threshold = 0.3
        # Instantiate PSDSEval
        psds = PSDSEval(dtc_threshold,
                        gtc_threshold,
                        cttc_threshold,
                        ground_truth=gtruth_df,
                        metadata=gtruth_durations)

        psds.add_operating_point(predictions)
        psds_score = psds.psds(alpha_ct=0, alpha_st=0, max_efpr=100)
        print(f"\nPSD-Score (0, 0, 100): {psds_score.value:.5f}")
        psds_score = psds.psds(alpha_ct=1, alpha_st=0, max_efpr=100)
        print(f"\nPSD-Score (1, 0, 100): {psds_score.value:.5f}")
        psds_score = psds.psds(alpha_ct=0, alpha_st=1, max_efpr=100)
        print(f"\nPSD-Score (0, 1, 100): {psds_score.value:.5f}")
    except psds_eval.psds.PSDSEvalError as e:
        logger.error("psds did not work ....")
        logger.error(e)
Beispiel #26
0
def test_example_2_paper_icassp(metadata):
    """Run PSDSEval on some sample data from the ICASSP paper"""
    det = pd.read_csv(join(DATADIR, "test_2.det"), sep="\t")
    gt = pd.read_csv(join(DATADIR, "test_2.gt"), sep="\t")
    # Record the checksums of the incoming data
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values
    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    exp_counts = np.array([[0, 0, 1, 1], [1, 0, 1, 0], [0, 0, 1, 1],
                           [0, 0, 0, 0]])

    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    assert psds1.value == pytest.approx(0.29047619047619044), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
Beispiel #27
0
def test_adding_shuffled_operating_points():
    """Avoid the addition of the same operating point after shuffling"""
    det = pd.read_csv(os.path.join(DATADIR, "test_1.det"), sep="\t")
    metadata = pd.read_csv(os.path.join(DATADIR, "test.metadata"), sep="\t")
    gt = pd.read_csv(os.path.join(DATADIR, "test_1.gt"), sep="\t")
    psds_eval = PSDSEval(metadata=metadata, ground_truth=gt)
    psds_eval.add_operating_point(det)
    det_shuffled = det.copy(deep=True)
    det_shuffled = det_shuffled.sample(frac=1.).reset_index(drop=True)
    psds_eval.add_operating_point(det_shuffled)
    det_shuffled2 = det.copy(deep=True)
    det_shuffled2 = det_shuffled2[["onset", "event_label", "offset",
                                   "filename"]]
    psds_eval.add_operating_point(det_shuffled2)
    assert psds_eval.num_operating_points() == 1
    assert psds_eval.operating_points["id"][0] == \
        "423089ce6d6554174881f69f9d0e57a8be9f5bc682dfce301462a8753aa6ec5f"
def test_full_dcase_validset():
    """Run PSDSEval on all the example data from DCASE"""
    det = pd.read_csv(join(DATADIR, "baseline_validation_AA_0.005.csv"),
                      sep="\t")
    gt = pd.read_csv(join(DATADIR, "baseline_validation_gt.csv"), sep="\t")
    metadata = pd.read_csv(join(DATADIR, "baseline_validation_metadata.csv"),
                           sep="\t")
    # Record the checksums of the incoming data
    meta_hash = pd.util.hash_pandas_object(metadata).values
    gt_hash = pd.util.hash_pandas_object(gt).values
    det_hash = pd.util.hash_pandas_object(det).values

    psds_eval = PSDSEval(dtc_threshold=0.5,
                         gtc_threshold=0.5,
                         cttc_threshold=0.3,
                         ground_truth=gt,
                         metadata=metadata)
    # matrix (n_class, n_class): axis 0 = gt, axis 1 = det
    exp_counts = np.array([
        [269, 5, 54, 37, 45, 14, 3, 8, 89, 0, 302],
        [9, 59, 17, 43, 10, 22, 20, 41, 120, 83, 416],
        [63, 4, 129, 8, 79, 11, 12, 13, 74, 1, 585],
        [41, 45, 19, 141, 73, 24, 139, 119, 495, 12, 1103],
        [120, 29, 105, 56, 278, 30, 96, 93, 825, 58, 1480],
        [13, 31, 13, 9, 7, 41, 35, 48, 203, 27, 386],
        [7, 35, 14, 63, 24, 51, 87, 135, 403, 46, 840],
        [18, 46, 16, 63, 51, 26, 103, 127, 187, 46, 662],
        [128, 86, 82, 87, 154, 62, 97, 185, 966, 120, 1340],
        [2, 58, 20, 7, 22, 43, 27, 32, 89, 67, 390],
        [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    ])
    tpr = np.array([
        0.64047619, 0.62105263, 0.37829912, 0.25044405, 0.4877193, 0.63076923,
        0.92553191, 0.53586498, 0.55105533, 0.72826087
    ])
    fpr = np.array([
        93.08219178, 128.21917808, 180.30821918, 339.96575342, 456.16438356,
        118.97260274, 258.90410959, 204.04109589, 413.01369863, 120.20547945
    ])
    ctr = np.array([[
        np.nan, 36.63950594, 412.06956854, 378.4295175, 201.60236547,
        100.2921207, 13.91555321, 23.16073227, 122.18868771, 0.
    ],
                    [
                        39.38051054, np.nan, 129.72560491, 439.79646629,
                        44.80052566, 157.60190396, 92.77035472, 118.69875286,
                        164.74879241, 382.88155531
                    ],
                    [
                        275.66357376, 29.31160475, np.nan, 81.82259838,
                        353.92415271, 78.80095198, 55.66221283, 37.63618993,
                        101.59508866, 4.61303079
                    ],
                    [
                        179.40010356, 329.75555343, 144.98744078, np.nan,
                        327.04383731, 171.92934977, 644.75396529, 344.51589244,
                        679.58876871, 55.35636944
                    ],
                    [
                        525.07347382, 212.50913443, 801.24638326, 572.75818865,
                        np.nan, 214.91168722, 445.29770265, 269.24351258,
                        1132.64794785, 267.55578564
                    ],
                    [
                        56.88295966, 227.16493681, 99.20193317, 92.05042318,
                        31.36036796, np.nan, 162.34812076, 138.96439359,
                        278.7000405, 124.55183125
                    ],
                    [
                        30.62928597, 256.47654156, 106.8328511, 644.35296223,
                        107.52126158, 365.34986827, np.nan, 390.83735697,
                        553.28136119, 212.1994162
                    ],
                    [
                        78.76102107, 337.08345462, 122.09468697, 644.35296223,
                        228.48268086, 186.25679559, 477.7673268, np.nan,
                        256.73353485, 212.1994162
                    ],
                    [
                        560.07837208, 630.19950211, 625.73527074, 889.82075737,
                        689.92809516, 444.15082025, 449.93622038, 535.59193363,
                        np.nan, 553.56369442
                    ],
                    [
                        8.75122456, 425.01826886, 152.61835872, 71.59477358,
                        98.56115645, 308.04008501, 125.23997887, 92.64292906,
                        122.18868771, np.nan
                    ]])
    psds_eval.add_operating_point(det)
    assert np.all(psds_eval.operating_points.counts[0] == exp_counts)
    np.testing.assert_allclose(psds_eval.operating_points.tpr[0], tpr)
    np.testing.assert_allclose(psds_eval.operating_points.fpr[0], fpr)
    np.testing.assert_allclose(psds_eval.operating_points.ctr[0], ctr)
    psds1 = psds_eval.psds(0.0, 0.0, 100.0)
    # Check that all the psds metrics match
    assert psds1.value == pytest.approx(0.0044306914546640595), \
        "PSDS value was calculated incorrectly"
    # Check that the data has not been messed about with
    assert np.all(pd.util.hash_pandas_object(gt).values == gt_hash)
    assert np.all(pd.util.hash_pandas_object(metadata).values == meta_hash)
    assert np.all(pd.util.hash_pandas_object(det).values == det_hash)
Beispiel #29
0
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    ground_truth_csv = os.path.join(data_dir, "dcase2019t4_gt.csv")
    metadata_csv = os.path.join(data_dir, "dcase2019t4_meta.csv")
    gt_table = pd.read_csv(ground_truth_csv, sep="\t")
    meta_table = pd.read_csv(metadata_csv, sep="\t")

    # Instantiate PSDSEval
    psds_eval = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold,
                         ground_truth=gt_table, metadata=meta_table)

    # Add the operating points, with the attached information
    for i, th in enumerate(np.arange(0.1, 1.1, 0.1)):
        csv_file = os.path.join(data_dir, f"baseline_{th:.1f}.csv")
        det_t = pd.read_csv(os.path.join(csv_file), sep="\t")
        info = {"name": f"Op {i + 1}", "threshold": th}
        psds_eval.add_operating_point(det_t, info=info)
        print(f"\rOperating point {i+1} added", end=" ")

    # Calculate the PSD-Score
    psds = psds_eval.psds(alpha_ct, alpha_st, max_efpr)
    print(f"\nPSD-Score: {psds.value:.5f}")

    # Plot the PSD-ROC
    plot_psd_roc(psds)

    # Plot per class tpr vs fpr/efpr/ctr
    tpr_vs_fpr, _, tpr_vs_efpr = psds_eval.psd_roc_curves(alpha_ct=1.)
    plot_per_class_psd_roc(tpr_vs_fpr, psds_eval.class_names,
                           title="Per-class TPR-vs-FPR PSDROC",
                           xlabel="FPR")
    plot_per_class_psd_roc(tpr_vs_efpr, psds_eval.class_names,
Beispiel #30
0
    gtc_threshold = 0.5
    cttc_threshold = 0.3
    alpha_ct = 0.0
    alpha_st = 0.0
    max_efpr = 100

    # Load metadata and ground truth tables
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    ground_truth_csv = os.path.join(data_dir, "dcase2019t4_gt.csv")
    metadata_csv = os.path.join(data_dir, "dcase2019t4_meta.csv")
    gt_table = pd.read_csv(ground_truth_csv, sep="\t")
    meta_table = pd.read_csv(metadata_csv, sep="\t")

    # Instantiate PSDSEval
    psds_eval = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold,
                         ground_truth=gt_table, metadata=meta_table)

    # Add the operating points
    for i, th in enumerate(np.arange(0.1, 1.1, 0.1)):
        csv_file = os.path.join(data_dir, f"baseline_{th:.1f}.csv")
        det_t = pd.read_csv(os.path.join(csv_file), sep="\t")
        psds_eval.add_operating_point(det_t)
        print(f"\rOperating point {i+1} added", end=" ")

    # Calculate the PSD-Score
    psds = psds_eval.psds(alpha_ct, alpha_st, max_efpr)
    print(f"\nPSD-Score: {psds.value:.5f}")

    # Plot the PSD-ROC
    plot_psd_roc(psds)