Exemplo n.º 1
0
    def test_optional_outputs(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred)

        o.print_report()

        df = o.save_to_dataframe()
        self.assertIsInstance(df, pd.DataFrame)

        columns = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'jaccard', 'missed_det_from_merge', 'gained_det_from_split',
            'true_det_in_catastrophe', 'pred_det_in_catastrophe', 'merge',
            'split', 'catastrophe', 'gained_detections'
        ]
        self.assertItemsEqual(columns, list(df.columns))

        # Check seg True case
        o = metrics.ObjectAccuracy(y_true, y_pred, seg=True)
        o.print_report()
        df = o.save_to_dataframe()
        columns = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'seg', 'jaccard', 'missed_det_from_merge', 'gained_det_from_split',
            'true_det_in_catastrophe', 'pred_det_in_catastrophe', 'merge',
            'split', 'catastrophe', 'gained_detections'
        ]
        self.assertItemsEqual(columns, list(df.columns))
Exemplo n.º 2
0
    def test_linear_assignment(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()

        o._linear_assignment()

        cols = [
            'n_pred', 'n_true', 'correct_detections', 'missed_detections',
            'gained_detections', 'missed_det_from_merge',
            'gained_det_from_split', 'true_det_in_catastrophe',
            'pred_det_in_catastrophe', 'merge', 'split', 'catastrophe'
        ]

        for obj in cols:
            self.assertTrue(hasattr(o, obj))

        # Test condition where seg = True
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()
        o._linear_assignment()

        for obj in ['results', 'cm_res', 'seg_score']:
            self.assertTrue(hasattr(o, obj))
Exemplo n.º 3
0
    def test_init_emptyframe(self):
        y_true, y_empty = _sample1(10, 10, 30, 30, True)

        # Check handling of empty frames
        y_empty[:, :] = 0
        y_empty = y_empty.astype('int')

        oempty = metrics.ObjectAccuracy(y_true, y_empty)
        self.assertEqual(oempty.empty_frame, 'n_pred')
        oempty = metrics.ObjectAccuracy(y_empty, y_true)
        self.assertEqual(oempty.empty_frame, 'n_true')
Exemplo n.º 4
0
    def test_calc_iou(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)

        o._calc_iou()

        # Check that iou was created
        self.assertTrue(hasattr(o, 'iou'))

        # Check that it is not equal to initial value
        self.assertNotEqual(np.count_nonzero(o.iou), 0)

        # Test seg_thresh creation
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()

        self.assertTrue(hasattr(o, 'seg_thresh'))
Exemplo n.º 5
0
    def test_classify_graph(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        # Test that complete run through is succesful
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for 0 degree graph
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, True)
        _ = metrics.ObjectAccuracy(y_true, y_pred)
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, False)
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for splits in 1 degree graph
        y_true, y_pred = _sample1(10, 10, 30, 30, False)
        _ = metrics.ObjectAccuracy(y_true, y_pred)

        # Test for catastrophic errors
        y_true, y_pred = _sample_catastrophe(10, 10, 30, 30)
        _ = metrics.ObjectAccuracy(y_true, y_pred)
Exemplo n.º 6
0
    def test_assign_loners(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._make_matrix()
        o._linear_assignment()

        o._assign_loners()
        self.assertTrue(hasattr(o, 'cost_l_bin'))
Exemplo n.º 7
0
    def test_modify_iou(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)

        o._calc_iou()
        o._modify_iou(force_event_links=False)

        # Check that modified_iou was created
        self.assertTrue(hasattr(o, 'iou_modified'))
Exemplo n.º 8
0
    def test_make_matrix(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()

        o._make_matrix()

        self.assertTrue(hasattr(o, 'cm'))

        self.assertNotEqual(np.count_nonzero(o.cm), 0)
Exemplo n.º 9
0
    def test_init(self):
        y_true, _ = _sample1(10, 10, 30, 30, True)

        # Test basic initialization
        o = metrics.ObjectAccuracy(y_true, y_true, test=True)

        # Check that object numbers are integers
        self.assertIsInstance(o.n_true, int)
        self.assertIsInstance(o.n_pred, int)

        self.assertEqual(o.empty_frame, False)
Exemplo n.º 10
0
    def test_array_to_graph(self):
        y_true, y_pred = _sample1(10, 10, 30, 30, True)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True)
        o._calc_iou()
        o._modify_iou(force_event_links=False)
        o._make_matrix()
        o._linear_assignment()
        o._assign_loners()

        o._array_to_graph()
        self.assertTrue(hasattr(o, 'G'))
Exemplo n.º 11
0
    def test_save_error_ids(self):

        # cell 1 in assigned correctly, cells 2 and 3 have been merged
        y_true, y_pred = _sample1(10, 10, 30, 30, merge=True)
        o = metrics.ObjectAccuracy(y_true, y_pred)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert set(label_dict['merges']['y_true']) == {2, 3}
        assert label_dict['merges']['y_pred'] == [2]

        # cell 1 in assigned correctly, cell 2 has been split
        y_true, y_pred = _sample1(10, 10, 30, 30, merge=False)
        o = metrics.ObjectAccuracy(y_true, y_pred)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert set(label_dict['splits']['y_pred']) == {2, 3}
        assert label_dict['splits']['y_true'] == [2]

        # gained cell in predictions
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, gain=True)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert label_dict['gains']['y_pred'] == [2]

        # missed cell in true
        y_true, y_pred = _sample4_loner(10, 10, 30, 30, gain=False)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert label_dict['correct']['y_true'] == [1]
        assert label_dict['correct']['y_pred'] == [1]
        assert label_dict['misses']['y_true'] == [2]

        # catastrophe between 3 cells
        y_true, y_pred = _sample3(10, 10, 30, 30)
        o = metrics.ObjectAccuracy(y_true, y_pred, cutoff1=0.2, cutoff2=0.1)
        label_dict, _, _ = o.save_error_ids()
        assert set(label_dict['catastrophes']['y_true']) == set(
            np.unique(y_true[y_true > 0]))
        assert set(label_dict['catastrophes']['y_pred']) == set(
            np.unique(y_pred[y_pred > 0]))

        # The tests below are more stochastic, and should be run multiple times
        for _ in range(10):

            # 3 cells merged together, with forced event links to ensure accurate assignment
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=True,
                                        similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       force_event_links=True,
                                       cutoff1=0.2,
                                       cutoff2=0.1)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert set(label_dict['merges']['y_true']) == {2, 3, 4}
            assert label_dict['merges']['y_pred'] == [2]

            # 3 cells merged together, without forced event links. Cells must be similar size
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=True,
                                        similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       force_event_links=False,
                                       cutoff1=0.2,
                                       cutoff2=0.1)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert set(label_dict['merges']['y_true']) == {2, 3, 4}
            assert label_dict['merges']['y_pred'] == [2]

            # 2 of 3 cells merged together, with forced event links to ensure accurate assignment
            y_true, y_pred, y_true_merge, y_true_correct, y_pred_merge, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['merges']['y_true']) == y_true_merge
            assert set(label_dict['merges']['y_pred']) == y_pred_merge

            # 2 of 3 cells merged together, without forced event links. Cells must be similar size
            y_true, y_pred, y_true_merge, y_true_correct, y_pred_merge, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['merges']['y_true']) == y_true_merge
            assert set(label_dict['merges']['y_pred']) == y_pred_merge

            # 1 cell split into three pieces, with forced event links to ensure accurate assignment
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=False,
                                        similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert label_dict['splits']['y_true'] == [2]
            assert set(label_dict['splits']['y_pred']) == {2, 3, 4}

            # 1 cell split in three pieces, without forced event links. Cells must be similar size
            y_true, y_pred = _sample2_3(10,
                                        10,
                                        30,
                                        30,
                                        merge=False,
                                        similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert label_dict['correct']['y_true'] == [1]
            assert label_dict['correct']['y_pred'] == [1]
            assert label_dict['splits']['y_true'] == [2]
            assert set(label_dict['splits']['y_pred']) == {2, 3, 4}

            # 1 cell split into two pieces, one small accurate cell, with forced event links
            y_true, y_pred, y_true_split, y_true_correct, y_pred_split, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, merge=False, similar_size=False)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=True)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['splits']['y_true']) == y_true_split
            assert set(label_dict['splits']['y_pred']) == y_pred_split

            # 1 cell split into two pieces, one small accurate cell, without forced event links
            y_true, y_pred, y_true_split, y_true_correct, y_pred_split, y_pred_correct = \
                _sample2_2(10, 10, 30, 30, merge=False, similar_size=True)
            o = metrics.ObjectAccuracy(y_true,
                                       y_pred,
                                       cutoff1=0.2,
                                       cutoff2=0.1,
                                       force_event_links=False)
            label_dict, _, _ = o.save_error_ids()
            assert set(label_dict['correct']['y_true']) == y_true_correct
            assert set(label_dict['correct']['y_pred']) == y_pred_correct
            assert set(label_dict['splits']['y_true']) == y_true_split
            assert set(label_dict['splits']['y_pred']) == y_pred_split