def test_compute_ignore_class(self): class_config = ClassConfig(names=['one', 'two']) class_config.update() class_config.ensure_null_class() null_class_id = class_config.get_null_class_id() gt_array = np.zeros((4, 4, 1), dtype=np.uint8) gt_array[0, 0, 0] = 2 gt_raster = MockRasterSource([0], 1) gt_raster.set_raster(gt_array) gt_label_source = SemanticSegmentationLabelSource( gt_raster, null_class_id) pred_array = np.zeros((4, 4, 1), dtype=np.uint8) pred_array[0, 0, 0] = 1 pred_raster = MockRasterSource([0], 1) pred_raster.set_raster(pred_array) pred_label_source = SemanticSegmentationLabelSource( pred_raster, null_class_id) eval = SemanticSegmentationEvaluation(class_config) eval.compute(gt_label_source.get_labels(), pred_label_source.get_labels()) self.assertAlmostEqual(1.0, eval.class_to_eval_item[0].f1) self.assertAlmostEqual(1.0, eval.avg_item.f1)
def test_compute(self): class_config = ClassConfig(names=['one', 'two']) class_config.update() class_config.ensure_null_class() null_class_id = class_config.get_null_class_id() gt_array = np.zeros((4, 4, 1), dtype=np.uint8) gt_array[2, 2, 0] = 1 gt_array[0, 0, 0] = 2 gt_raster = MockRasterSource([0], 1) gt_raster.set_raster(gt_array) gt_label_source = SemanticSegmentationLabelSource( gt_raster, null_class_id) p_array = np.zeros((4, 4, 1), dtype=np.uint8) p_array[1, 1, 0] = 1 p_raster = MockRasterSource([0], 1) p_raster.set_raster(p_array) p_label_source = SemanticSegmentationLabelSource( p_raster, null_class_id) eval = SemanticSegmentationEvaluation(class_config) eval.compute(gt_label_source.get_labels(), p_label_source.get_labels()) tp0 = 16 - 3 # 4*4 - 3 true positives for class 0 fp0 = 1 # 1 false positive (2,2) and one don't care at (0,0) fn0 = 1 # one false negative (1,1) precision0 = float(tp0) / (tp0 + fp0) recall0 = float(tp0) / (tp0 + fn0) f10 = 2 * float(precision0 * recall0) / (precision0 + recall0) tp1 = 0 # 0 true positives for class 1 fn1 = 1 # one false negative (2,2) precision1 = 0 # float(tp1) / (tp1 + fp1) where fp1 == 1 recall1 = float(tp1) / (tp1 + fn1) f11 = None self.assertAlmostEqual(precision0, eval.class_to_eval_item[0].precision) self.assertAlmostEqual(recall0, eval.class_to_eval_item[0].recall) self.assertAlmostEqual(f10, eval.class_to_eval_item[0].f1) self.assertEqual(precision1, eval.class_to_eval_item[1].precision) self.assertAlmostEqual(recall1, eval.class_to_eval_item[1].recall) self.assertAlmostEqual(f11, eval.class_to_eval_item[1].f1) avg_conf_mat = np.array([[0, 0, 0], [13., 1, 0], [1, 0, 0]]) avg_recall = (14 / 15) * recall0 + (1 / 15) * recall1 self.assertTrue(np.array_equal(avg_conf_mat, eval.avg_item.conf_mat)) self.assertEqual(avg_recall, eval.avg_item.recall)
def test_get_labels_rgb(self): data = np.zeros((10, 10, 3), dtype=np.uint8) data[7:, 7:, :] = [1, 1, 1] null_class_id = 2 raster_source = MockRasterSource([0, 1, 2], 3) raster_source.set_raster(data) rgb_class_config = ClassConfig(names=['a'], colors=['#010101']) rgb_class_config.ensure_null_class() label_source = SemanticSegmentationLabelSource( raster_source, null_class_id, rgb_class_config=rgb_class_config) with label_source.activate(): window = Box.make_square(7, 7, 3) labels = label_source.get_labels(window=window) label_arr = labels.get_label_arr(window) expected_label_arr = np.zeros((3, 3)) np.testing.assert_array_equal(label_arr, expected_label_arr)
def test_vector_compute(self): class_config = ClassConfig(names=['one', 'two']) class_config.update() class_config.ensure_null_class() gt_uri = data_file_path('2-gt-polygons.geojson') pred_uri = data_file_path('2-pred-polygons.geojson') eval = SemanticSegmentationEvaluation(class_config) eval.compute_vector(gt_uri, pred_uri, 'polygons', 0) # NOTE: The two geojson files referenced above contain three # unique geometries total, each file contains two geometries, # and there is one geometry shared between the two. tp = 1.0 fp = 1.0 fn = 1.0 precision = float(tp) / (tp + fp) recall = float(tp) / (tp + fn) self.assertAlmostEqual(precision, eval.class_to_eval_item[0].precision) self.assertAlmostEqual(recall, eval.class_to_eval_item[0].recall)
class TestSemanticSegmentationEvaluator(unittest.TestCase): def setUp(self): self.tmp_dir = rv_config.get_tmp_dir() self.class_config = ClassConfig(names=['one', 'two']) self.class_config.update() self.class_config.ensure_null_class() self.null_class_id = self.class_config.get_null_class_id() def tearDown(self): self.tmp_dir.cleanup() def get_scene(self, class_id): # Make scene where ground truth is all set to class_id # and predictions are set to half 0's and half 1's scene_id = str(class_id) rs = MockRasterSource(channel_order=[0, 1, 2], num_channels=3) rs.set_raster(np.zeros((10, 10, 3))) gt_rs = MockRasterSource(channel_order=[0], num_channels=1) gt_arr = np.full((10, 10, 1), class_id) gt_rs.set_raster(gt_arr) gt_ls = SemanticSegmentationLabelSource(gt_rs, self.null_class_id) pred_rs = MockRasterSource(channel_order=[0], num_channels=1) pred_arr = np.zeros((10, 10, 1)) pred_arr[5:10, :, :] = 1 pred_rs.set_raster(pred_arr) pred_ls = SemanticSegmentationLabelSource(pred_rs, self.null_class_id) return Scene(scene_id, rs, gt_ls, pred_ls) def test_evaluator(self): output_uri = join(self.tmp_dir.name, 'out.json') scenes = [self.get_scene(0), self.get_scene(1)] evaluator = SemanticSegmentationEvaluator(self.class_config, output_uri, None) evaluator.process(scenes, self.tmp_dir.name) eval_json = file_to_json(output_uri) exp_eval_json = file_to_json(data_file_path('expected-eval.json')) self.assertDictEqual(eval_json, exp_eval_json) def get_vector_scene(self, class_id, use_aoi=False): gt_uri = data_file_path('{}-gt-polygons.geojson'.format(class_id)) pred_uri = data_file_path('{}-pred-polygons.geojson'.format(class_id)) scene_id = str(class_id) rs = MockRasterSource(channel_order=[0, 1, 3], num_channels=3) rs.set_raster(np.zeros((10, 10, 3))) crs_transformer = IdentityCRSTransformer() extent = Box.make_square(0, 0, 360) config = RasterizedSourceConfig( vector_source=GeoJSONVectorSourceConfig(uri=gt_uri, default_class_id=0), rasterizer_config=RasterizerConfig(background_class_id=1)) gt_rs = config.build(self.class_config, crs_transformer, extent) gt_ls = SemanticSegmentationLabelSource(gt_rs, self.null_class_id) config = RasterizedSourceConfig( vector_source=GeoJSONVectorSourceConfig(uri=pred_uri, default_class_id=0), rasterizer_config=RasterizerConfig(background_class_id=1)) pred_rs = config.build(self.class_config, crs_transformer, extent) pred_ls = SemanticSegmentationLabelSource(pred_rs, self.null_class_id) pred_ls.vector_output = [ PolygonVectorOutputConfig(uri=pred_uri, denoise=0, class_id=class_id) ] if use_aoi: aoi_uri = data_file_path('{}-aoi.geojson'.format(class_id)) aoi_geojson = file_to_json(aoi_uri) aoi_polygons = [shape(aoi_geojson['features'][0]['geometry'])] return Scene(scene_id, rs, gt_ls, pred_ls, aoi_polygons) return Scene(scene_id, rs, gt_ls, pred_ls) def test_vector_evaluator(self): output_uri = join(self.tmp_dir.name, 'raster-out.json') vector_output_uri = join(self.tmp_dir.name, 'vector-out.json') scenes = [self.get_vector_scene(0), self.get_vector_scene(1)] evaluator = SemanticSegmentationEvaluator(self.class_config, output_uri, vector_output_uri) evaluator.process(scenes, self.tmp_dir.name) vector_eval_json = file_to_json(vector_output_uri) exp_vector_eval_json = file_to_json( data_file_path('expected-vector-eval.json')) # NOTE: The precision and recall values found in the file # `expected-vector-eval.json` are equal to fractions of the # form (n-1)/n for n <= 7 which can be seen to be (and have # been manually verified to be) correct. self.assertDictEqual(vector_eval_json, exp_vector_eval_json) def test_vector_evaluator_with_aoi(self): output_uri = join(self.tmp_dir.name, 'raster-out.json') vector_output_uri = join(self.tmp_dir.name, 'vector-out.json') scenes = [self.get_vector_scene(0, use_aoi=True)] evaluator = SemanticSegmentationEvaluator(self.class_config, output_uri, vector_output_uri) evaluator.process(scenes, self.tmp_dir.name) vector_eval_json = file_to_json(vector_output_uri) exp_vector_eval_json = file_to_json( data_file_path('expected-vector-eval-with-aoi.json')) # NOTE: The precision and recall values found in the file # `expected-vector-eval.json` are equal to fractions of the # form (n-1)/n for n <= 7 which can be seen to be (and have # been manually verified to be) correct. self.assertDictEqual(vector_eval_json, exp_vector_eval_json)
def get_config(runner, root_uri, data_uri=None, full_train=False, nochip=False): def get_path(part): if full_train: return join(data_uri, part) else: return join(dirname(__file__), part) class_config = ClassConfig(names=['red', 'green'], colors=['red', 'green']) class_config.ensure_null_class() def make_scene(id, img_path, label_path): raster_source = RasterioSourceConfig( channel_order=[0, 1, 2], uris=[img_path]) label_source = SemanticSegmentationLabelSourceConfig( rgb_class_config=class_config, raster_source=RasterioSourceConfig(uris=[label_path])) label_store = SemanticSegmentationLabelStoreConfig( rgb=True, vector_output=[ PolygonVectorOutputConfig(class_id=0), BuildingVectorOutputConfig(class_id=1) ]) return SceneConfig( id=id, raster_source=raster_source, label_source=label_source, label_store=label_store) chip_sz = 300 img_sz = chip_sz scenes = [ make_scene('test-scene', get_path('scene/image.tif'), get_path('scene/labels.tif')), make_scene('test-scene2', get_path('scene/image2.tif'), get_path('scene/labels2.tif')) ] scene_dataset = DatasetConfig( class_config=class_config, train_scenes=scenes, validation_scenes=scenes) chip_options = SemanticSegmentationChipOptions( window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz) if nochip: window_opts = GeoDataWindowConfig( method=GeoDataWindowMethod.sliding, stride=chip_options.stride, size=chip_sz) data = SemanticSegmentationGeoDataConfig( scene_dataset=scene_dataset, window_opts=window_opts, img_sz=img_sz, augmentors=[]) else: data = SemanticSegmentationImageDataConfig( img_sz=img_sz, augmentors=[]) if full_train: model = SemanticSegmentationModelConfig(backbone=Backbone.resnet50) solver = SolverConfig( lr=1e-4, num_epochs=300, batch_sz=8, one_cycle=True, sync_interval=300) else: pretrained_uri = ( 'https://github.com/azavea/raster-vision-data/releases/download/v0.12/' 'semantic-segmentation.pth') model = SemanticSegmentationModelConfig( backbone=Backbone.resnet50, init_weights=pretrained_uri) solver = SolverConfig( lr=1e-9, num_epochs=1, batch_sz=2, one_cycle=True, sync_interval=200) backend = PyTorchSemanticSegmentationConfig( data=data, model=model, solver=solver, log_tensorboard=False, run_tensorboard=False) return SemanticSegmentationConfig( root_uri=root_uri, dataset=scene_dataset, backend=backend, train_chip_sz=chip_sz, predict_chip_sz=chip_sz, chip_options=chip_options)