def test_optional_outputs(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred)

        o.print_report()

        df = o.save_to_dataframe()
        assert isinstance(df, pd.DataFrame)

        columns = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'gained_detections', 'missed_det_from_merge',
            'gained_det_from_split', 'true_det_in_catastrophe',
            'pred_det_in_catastrophe', 'merge', 'split', 'catastrophe',
            'jaccard', 'precision', 'recall', 'f1'
        ]
        assert np.array_equal(sorted(columns), sorted(list(df.columns)))

        # Check seg True case
        o = metrics.ObjectAccuracy(y_true, y_pred, seg=True)
        o.print_report()
        df = o.save_to_dataframe()
        columns = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'gained_detections', 'missed_det_from_merge',
            'gained_det_from_split', 'true_det_in_catastrophe',
            'pred_det_in_catastrophe', 'merge', 'split', 'catastrophe', 'seg',
            'jaccard', 'precision', 'recall', 'f1'
        ]
        assert np.array_equal(sorted(columns), sorted(list(df.columns)))
    def test_linear_assignment(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()

        o._linear_assignment()

        cols = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'gained_detections', 'missed_det_from_merge',
            'gained_det_from_split', 'true_det_in_catastrophe',
            'pred_det_in_catastrophe', 'merge', 'split', 'catastrophe'
        ]

        for obj in cols:
            assert hasattr(o, obj)

        # Test condition where seg = True
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()
        o._linear_assignment()

        for obj in ['results', 'cm_res', 'seg_score']:
            assert hasattr(o, obj)
    def test_calc_iou_3D(self):
        y_true, y_pred = _sample1_3D(10, 10, 30, 30, True, 8)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, is_3d=True)

        o._calc_iou()

        # Check that iou was created
        assert hasattr(o, 'iou')

        # Check that it is not equal to initial value
        assert np.count_nonzero(o.iou) != 0

        # Test seg_thresh creation
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()

        assert hasattr(o, 'seg_thresh')

        m = metrics.Metrics('test', is_3d=True)

        # test errors thrown for improper ndim inputs
        y_true = np.zeros(shape=(10, 15, 11))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_true)

        y_true = np.zeros(shape=(10, 15, 15, 10, 15))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_true)

        y_true = np.zeros(shape=(2, 3, 5, 2))
        y_pred = np.zeros(shape=(1, 4, 11, 2))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_pred)
    def test_init(self):
        y_true, _ = _sample1(10, 10, 30, 30, True)

        # Test basic initialization
        o = metrics.ObjectAccuracy(y_true, y_true, test=True)

        # Check that object numbers are integers
        assert isinstance(o.n_true, int)
        assert isinstance(o.n_pred, int)

        testing.assert_equal(o.empty_frame, False)

        # test errors thrown for improper ndim inputs
        y_true = np.zeros(shape=(10))
        with pytest.raises(ValueError):
            o = metrics.ObjectAccuracy(y_true, y_true, test=True)

        y_true = np.zeros(shape=(10, 5, 5, 5))
        with pytest.raises(ValueError):
            o = metrics.ObjectAccuracy(y_true, y_true, test=True)

        # test errors thrown for improper ndim inputs with 3d data
        y_true = np.zeros(shape=(10, 15))
        y_pred = y_true
        with pytest.raises(ValueError):
            o = metrics.ObjectAccuracy(y_true, y_pred, test=True, is_3d=True)

        y_true = np.zeros(shape=(10, 15, 15, 10))
        y_pred = y_true
        with pytest.raises(ValueError):
            o = metrics.ObjectAccuracy(y_true, y_pred, test=True, is_3d=True)
    def test_init_emptyframe(self):
        y_true, y_empty = _sample1(10, 10, 30, 30, True)

        # Check handling of empty frames
        y_empty[:, :] = 0
        y_empty = y_empty.astype('int')

        oempty = metrics.ObjectAccuracy(y_true, y_empty)
        testing.assert_equal(oempty.empty_frame, 'n_pred')
        oempty = metrics.ObjectAccuracy(y_empty, y_true)
        testing.assert_equal(oempty.empty_frame, 'n_true')
    def test_init_noloners(self):
        # Generate split error and use single cell in true for both pred/true
        y_true, y_pred = _sample1(10, 10, 30, 30, False)

        o = metrics.ObjectAccuracy(y_true, y_true)

        # Look for creation of `self.n_pred2` which indicates assignloners ran inccorectly
        testing.assert_equal(hasattr(o, 'n_pred2'), False)
    def test_classify_graph(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        # Test that complete run through is succesful
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for 0 degree graph
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, True)
        _ = metrics.ObjectAccuracy(y_true, y_pred)
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, False)
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for splits in 1 degree graph
        y_true, y_pred = _sample1(10, 10, 30, 30, False)
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for catastrophic errors
        y_true, y_pred = _sample3(10, 10, 30, 30)
        _ = metrics.ObjectAccuracy(y_true, y_pred)
    def test_modify_iou(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)

        o._calc_iou()
        o._modify_iou(force_event_links=False)

        # Check that modified_iou was created
        assert hasattr(o, 'iou_modified')
    def test_assign_loners(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()
        o._linear_assignment()

        o._assign_loners()
        assert hasattr(o, 'cost_l_bin')
Example #10
0
    def test_make_matrix(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)

        o._make_matrix()

        assert hasattr(o, 'cm')

        assert np.count_nonzero(o.cm) != 0
Example #11
0
    def test_calc_iou(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)

        o._calc_iou()

        # Check that iou was created
        assert hasattr(o, 'iou')

        # Check that it is not equal to initial value
        assert np.count_nonzero(o.iou) != 0

        # Test seg_thresh creation
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()

        assert hasattr(o, 'seg_thresh')

        # check that image without any background passes
        y_true, y_pred = _dense_sample()
        o = metrics.ObjectAccuracy(y_true=y_true, y_pred=y_pred, test=False)

        o._calc_iou()
Example #12
0
    def test_save_error_ids(self):

        # cell 1 in assigned correctly, cells 2 and 3 have been merged
        y_true, y_pred = _sample1(10, 10, 30, 30, merge=True)
        o = metrics.ObjectAccuracy(y_true, y_pred)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert set(label_dict['merges']['y_true']) == {2, 3}
        assert label_dict['merges']['y_pred'] == [2]

        # cell 1 in assigned correctly, cell 2 has been split
        y_true, y_pred = _sample1(10, 10, 30, 30, merge=False)
        o = metrics.ObjectAccuracy(y_true, y_pred)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert set(label_dict['splits']['y_pred']) == {2, 3}
        assert label_dict['splits']['y_true'] == [2]

        # gained cell in predictions
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, gain=True)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert label_dict['gains']['y_pred'] == [2]

        # missed cell in true
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, gain=False)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert label_dict['misses']['y_true'] == [2]

        # catastrophe between 3 cells
        y_true, y_pred = _sample3(10, 10, 30, 30)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert set(label_dict['catastrophes']['y_true']) == set(
            np.unique(y_true[y_true > 0]))
        assert set(label_dict['catastrophes']['y_pred']) == set(
            np.unique(y_pred[y_pred > 0]))

        # The tests below are more stochastic, and should be run multiple times
        for _ in range(10):

            # 3 cells merged together, with forced event links to ensure accurate assignment
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=True,
                                        similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       force_event_links=True,
                                       cutoff1=0.2,
                                       cutoff2=0.1)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert set(label_dict['merges']['y_true']) == {2, 3, 4}
            assert label_dict['merges']['y_pred'] == [2]

            # 3 cells merged together, without forced event links. Cells must be similar size
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=True,
                                        similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       force_event_links=False,
                                       cutoff1=0.2,
                                       cutoff2=0.1)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert set(label_dict['merges']['y_true']) == {2, 3, 4}
            assert label_dict['merges']['y_pred'] == [2]

            # 2 of 3 cells merged together, with forced event links to ensure accurate assignment
            y_true, y_pred, y_true_merge, y_true_correct, y_pred_merge, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['merges']['y_true']) == y_true_merge
            assert set(label_dict['merges']['y_pred']) == y_pred_merge

            # 2 of 3 cells merged together, without forced event links. Cells must be similar size
            y_true, y_pred, y_true_merge, y_true_correct, y_pred_merge, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['merges']['y_true']) == y_true_merge
            assert set(label_dict['merges']['y_pred']) == y_pred_merge

            # 1 cell split into three pieces, with forced event links to ensure accurate assignment
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=False,
                                        similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert label_dict['splits']['y_true'] == [2]
            assert set(label_dict['splits']['y_pred']) == {2, 3, 4}

            # 1 cell split in three pieces, without forced event links. Cells must be similar size
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=False,
                                        similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert label_dict['splits']['y_true'] == [2]
            assert set(label_dict['splits']['y_pred']) == {2, 3, 4}

            # 1 cell split into two pieces, one small accurate cell, with forced event links
            y_true, y_pred, y_true_split, y_true_correct, y_pred_split, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, merge=False, similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['splits']['y_true']) == y_true_split
            assert set(label_dict['splits']['y_pred']) == y_pred_split

            # 1 cell split into two pieces, one small accurate cell, without forced event links
            y_true, y_pred, y_true_split, y_true_correct, y_pred_split, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, merge=False, similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['splits']['y_true']) == y_true_split
            assert set(label_dict['splits']['y_pred']) == y_pred_split