Ejemplo n.º 1
0
    def test_save_to_json(self):
        name = 'test'
        outdir = self.get_temp_dir()
        m = metrics.Metrics(name, outdir=outdir)

        # Create test list to save
        L = []
        for i in range(10):
            L.append(dict(
                name=i,
                value=i,
                feature='test',
                stat_type='output'
            ))

        m.save_to_json(L)
        todays_date = datetime.datetime.now().strftime('%Y-%m-%d')
        outfilename = os.path.join(outdir, name + '_' + todays_date + '.json')

        # Check that file exists
        self.assertEqual(os.path.isfile(outfilename), True)

        # Check that it can be opened
        with open(outfilename) as json_file:
            data = json.load(json_file)

        # Check data types from loaded data
        self.assertIsInstance(data, dict)
        self.assertItemsEqual(list(data.keys()), ['metrics', 'metadata'])
        self.assertIsInstance(data['metrics'], list)
        self.assertIsInstance(data['metadata'], dict)
Ejemplo n.º 2
0
    def test_confusion_matrix(self):
        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        m = metrics.Metrics('test')

        cm = m.calc_pixel_confusion_matrix(y_true, y_pred)
        self.assertEqual(cm.shape[0], y_true.shape[-1])
Ejemplo n.º 3
0
    def test_df_to_dict(self):
        m = metrics.Metrics('test')
        df = _generate_df()

        L = m.pixel_df_to_dict(df)

        # Check output types
        self.assertNotEqual(len(L), 0)
        self.assertIsInstance(L, list)
        self.assertIsInstance(L[0], dict)
Ejemplo n.º 4
0
    def test_metric_object_stats(self):
        y_true = label(_generate_stack_3d())
        y_pred = label(_generate_stack_3d())

        m = metrics.Metrics('test')
        before = len(m.output)

        m.calc_object_stats(y_true, y_pred)

        # Check data added to output
        self.assertNotEqual(before, len(m.output))
Ejemplo n.º 5
0
    def test_all_pixel_stats(self):
        m = metrics.Metrics('test')

        before = len(m.output)

        y_true = _generate_stack_4d()
        y_pred = _generate_stack_4d()

        m.all_pixel_stats(y_true, y_pred)

        # Check that items were added to output
        self.assertNotEqual(before, len(m.output))

        # Check mismatch error
        self.assertRaises(ValueError, m.all_pixel_stats, np.ones(
            (10, 10, 10, 1)), np.ones((5, 5, 5, 1)))
Ejemplo n.º 6
0
    def test_run_all(self):
        y_true_lbl = label(_generate_stack_3d())
        y_pred_lbl = label(_generate_stack_3d())
        y_true_unlbl = _generate_stack_4d()
        y_pred_unlbl = _generate_stack_4d()

        name = 'test'
        outdir = self.get_temp_dir()
        m = metrics.Metrics(name, outdir=outdir)

        before = len(m.output)

        m.run_all(y_true_lbl, y_pred_lbl, y_true_unlbl, y_pred_unlbl)

        # Assert that data was added to output
        self.assertNotEqual(len(m.output), before)

        # Check output file
        todays_date = datetime.datetime.now().strftime('%Y-%m-%d')
        outname = os.path.join(outdir, name + '_' + todays_date + '.json')
        self.assertEqual(os.path.isfile(outname), True)
Ejemplo n.º 7
0
    def test_Metrics_init(self):
        m = metrics.Metrics('test')

        self.assertEqual(hasattr(m, 'output'), True)
Ejemplo n.º 8
0
i = np.random.randint(0, high=1440)
print(i)

y_true = label(y_test[i:i + 1, :, :, 0].astype('int'))
y_pred = label(predict_lbl[i:i + 1])

fig, ax = plt.subplots(1, 3, figsize=(10, 8))
ax[0].imshow(predict[i, :, :, 1])
ax[0].set_title('Prediction')
# Repeat labeling to get number assignments in range for this particular frame
ax[1].imshow(y_pred[0], cmap='jet')
ax[1].set_title('Labeled Prediction')
ax[2].imshow(y_true[0], cmap='jet')
ax[2].set_title('Labeled Truth')

m = metrics.Metrics('singleton', seg=True)
m.calc_object_stats(y_true, y_pred)

#reload(metrics)

#%%time
m = metrics.Metrics('fgbg', seg=True)

y_true_lbl = y_test[:, :, :, 0].astype('int')
y_pred_lbl = predict_lbl

m.calc_object_stats(y_true_lbl, y_pred_lbl)

#Note: pixel based statistics are also available from `m.all_pixel_stats`, but it requires that the ground truth annotation be transformed to match the features output by the model prediction.
#Additionally, the final metric report can be saved using `m.save_to_json(m.output)` after object and/or pixel statistics have been generated.
#Currently this dataset has small enough frame sizes that they can be looped over in `m.calc_object_stats` without too much of a problem. For datasets with larger frame sizes,