예제 #1
0
    def test_calc_object_stats(self):
        y_true = label(_generate_stack_3d())
        y_pred = label(_generate_stack_3d())

        m = metrics.Metrics('test')

        # test that metrics are generated
        object_metrics = m.calc_object_stats(y_true, y_pred)
        # each row of metrics corresponds to a batch
        assert len(object_metrics) == len(y_true)

        object_metrics = m.calc_object_stats(
            np.zeros_like(y_true), np.zeros_like(y_pred))

        # test accuracy of metrics with blank predictions
        assert object_metrics['precision'].sum() == 0
        assert object_metrics['recall'].sum() == 0

        # Raise input size error
        with testing.assert_raises(ValueError):
            m.calc_object_stats(np.random.rand(10, 10), np.random.rand(10, 10))

        # Raise error if y_pred.shape != y_true.shape
        with testing.assert_raises(ValueError):
            m.calc_object_stats(np.random.rand(10, 10), np.random.rand(10,))

        # data that needs to be relabeled raises a warning
        with pytest.warns(UserWarning):
            y_pred[0, 0, 0] = 40
            m.calc_object_stats(y_true, y_pred)

        # seg is deprecated (TODO: this will be removed)
        with pytest.warns(DeprecationWarning):
            _ = metrics.Metrics('test', seg=True)
예제 #2
0
    def test_calc_iou_3D(self):
        y_true, y_pred = _sample1_3D(10, 10, 30, 30, True, 8)
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, is_3d=True)

        o._calc_iou()

        # Check that iou was created
        assert hasattr(o, 'iou')

        # Check that it is not equal to initial value
        assert np.count_nonzero(o.iou) != 0

        # Test seg_thresh creation
        o = metrics.ObjectAccuracy(y_true, y_pred, test=True, seg=True)
        o._calc_iou()

        assert hasattr(o, 'seg_thresh')

        m = metrics.Metrics('test', is_3d=True)

        # test errors thrown for improper ndim inputs
        y_true = np.zeros(shape=(10, 15, 11))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_true)

        y_true = np.zeros(shape=(10, 15, 15, 10, 15))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_true)

        y_true = np.zeros(shape=(2, 3, 5, 2))
        y_pred = np.zeros(shape=(1, 4, 11, 2))
        with pytest.raises(ValueError):
            m.calc_object_stats(y_true, y_pred)
예제 #3
0
    def test_save_to_json(self, tmpdir):
        name = 'test'
        tmpdir = str(tmpdir)
        m = metrics.Metrics(name, outdir=tmpdir)

        # Create test list to save
        L = []
        for i in range(10):
            L.append(dict(name=i, value=i, feature='test', stat_type='output'))

        m.save_to_json(L)
        todays_date = datetime.datetime.now().strftime('%Y-%m-%d')
        outfilename = os.path.join(tmpdir, name + '_' + todays_date + '.json')

        # Check that file exists
        testing.assert_equal(os.path.isfile(outfilename), True)

        # Check that it can be opened
        with open(outfilename) as json_file:
            data = json.load(json_file)

        # Check data types from loaded data
        assert isinstance(data, dict)
        assert np.array_equal(sorted(list(data.keys())),
                              ['metadata', 'metrics'])
        assert isinstance(data['metrics'], list)
        assert isinstance(data['metadata'], dict)
예제 #4
0
    def test_confusion_matrix(self):
        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        m = metrics.Metrics('test')

        cm = m.calc_pixel_confusion_matrix(y_true, y_pred)
        testing.assert_equal(cm.shape[0], y_true.shape[-1])
예제 #5
0
    def test_run_all(self, tmpdir):
        tmpdir = str(tmpdir)
        y_true = label(_generate_stack_3d())
        y_pred = label(_generate_stack_3d())

        name = 'test'
        m = metrics.Metrics(name, outdir=tmpdir)

        m.run_all(y_true, y_pred)
예제 #6
0
    def test_df_to_dict(self):
        m = metrics.Metrics('test')
        df = _generate_df()

        L = m.pixel_df_to_dict(df)

        # Check output types
        assert len(L) != 0
        assert isinstance(L, list)
        assert isinstance(L[0], dict)
예제 #7
0
    def test_calc_pixel_stats(self):
        m = metrics.Metrics('test')

        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        pixel_stats = m.calc_pixel_stats(y_true, y_pred)

        for stat in pixel_stats:
            assert 'name' in stat
            assert 'stat_type' in stat
예제 #8
0
    def test_calc_object_stats_3d(self):
        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        m = metrics.Metrics('test', is_3d=True)

        # test that metrics are generated
        object_metrics = m.calc_object_stats(y_true, y_pred)
        # each row of metrics corresponds to a batch
        assert len(object_metrics) == len(y_true)

        # test accuracy of metrics with blank predictions
        object_metrics = m.calc_object_stats(
            np.zeros_like(y_true), np.zeros_like(y_pred))

        assert object_metrics['precision'].sum() == 0
        assert object_metrics['recall'].sum() == 0

        # Raise error if is_3d and ndim !=4
        with testing.assert_raises(ValueError):
            m3d = metrics.Metrics('test', is_3d=True)
            m3d.calc_object_stats(np.random.random((32, 32, 1)),
                                  np.random.random((32, 32, 1)))
예제 #9
0
    def test_all_pixel_stats(self):
        m = metrics.Metrics('test')

        before = len(m.output)

        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        m.all_pixel_stats(y_true, y_pred)

        # Check that items were added to output
        assert before != len(m.output)

        # Check mismatch error
        testing.assert_raises(ValueError, m.all_pixel_stats,
                              np.ones((10, 10, 10, 1)), np.ones((5, 5, 5, 1)))
예제 #10
0
    def test_metric_object_stats(self):
        y_true = label(_generate_stack_3d())
        y_pred = label(_generate_stack_3d())

        m = metrics.Metrics('test')
        before = len(m.output)

        m.calc_object_stats(y_true, y_pred)

        # Check data added to output
        assert before != len(m.output)

        # Raise input size error
        with testing.assert_raises(ValueError):
            m.calc_object_stats(np.random.rand(10, 10), np.random.rand(10, 10))

        # data that needs to be relabeled raises a warning
        with pytest.warns(UserWarning):
            y_pred[0, 0, 0] = 40
            m.calc_object_stats(y_true, y_pred)
예제 #11
0
    def test_run_all(self, tmpdir):
        tmpdir = str(tmpdir)
        y_true_lbl = label(_generate_stack_3d())
        y_pred_lbl = label(_generate_stack_3d())
        y_true_unlbl = _generate_stack_4d()
        y_pred_unlbl = _generate_stack_4d()

        name = 'test'
        for seg in [True, False]:
            m = metrics.Metrics(name, outdir=tmpdir, seg=seg)

            before = len(m.output)

            m.run_all(y_true_lbl, y_pred_lbl, y_true_unlbl, y_pred_unlbl)

            # Assert that data was added to output
            assert len(m.output) != before

            # Check output file
            todays_date = datetime.datetime.now().strftime('%Y-%m-%d')
            outname = os.path.join(tmpdir, name + '_' + todays_date + '.json')
            testing.assert_equal(os.path.isfile(outname), True)
예제 #12
0
    def test_Metrics_init(self):
        m = metrics.Metrics('test')

        testing.assert_equal(hasattr(m, 'output'), True)