def get_vector_scene(self, class_id, use_aoi=False): gt_uri = data_file_path('{}-gt-polygons.geojson'.format(class_id)) pred_uri = data_file_path('{}-pred-polygons.geojson'.format(class_id)) scene_id = str(class_id) rs = MockRasterSource(channel_order=[0, 1, 3], num_channels=3) rs.set_raster(np.zeros((10, 10, 3))) crs_transformer = IdentityCRSTransformer() extent = Box.make_square(0, 0, 360) gt_rs = RasterizedSource(GeoJSONVectorSource(gt_uri, crs_transformer), RasterizedSourceConfig.RasterizerOptions(2), extent, crs_transformer) gt_ls = SemanticSegmentationLabelSource(source=gt_rs) pred_rs = RasterizedSource( GeoJSONVectorSource(pred_uri, crs_transformer), RasterizedSourceConfig.RasterizerOptions(2), extent, crs_transformer) pred_ls = SemanticSegmentationLabelSource(source=pred_rs) pred_ls.vector_output = [{ 'uri': pred_uri, 'denoise': 0, 'mode': 'polygons', 'class_id': class_id }] if use_aoi: aoi_uri = data_file_path('{}-aoi.geojson'.format(class_id)) aoi_geojson = json.loads(file_to_str(aoi_uri)) aoi_polygons = [shape(aoi_geojson['features'][0]['geometry'])] return Scene(scene_id, rs, gt_ls, pred_ls, aoi_polygons) return Scene(scene_id, rs, gt_ls, pred_ls)
def process(self, scenes, tmp_dir): evaluation = self.create_evaluation() vect_evaluation = self.create_evaluation() for scene in scenes: log.info('Computing evaluation for scene {}...'.format(scene.id)) label_source = scene.ground_truth_label_source label_store = scene.prediction_label_store with ActivateMixin.compose(label_source, label_store): ground_truth = label_source.get_labels() predictions = label_store.get_labels() if scene.aoi_polygons: # Filter labels based on AOI. ground_truth = ground_truth.filter_by_aoi( scene.aoi_polygons) predictions = predictions.filter_by_aoi(scene.aoi_polygons) scene_evaluation = self.create_evaluation() scene_evaluation.compute(ground_truth, predictions) evaluation.merge(scene_evaluation, scene_id=scene.id) if hasattr(label_source, 'source') and hasattr( label_source.source, 'vector_source') and hasattr( label_store, 'vector_output'): gt_geojson = label_source.source.vector_source.get_geojson() for vo in label_store.vector_output: pred_geojson_uri = vo['uri'] mode = vo['mode'] class_id = vo['class_id'] pred_geojson_source = GeoJSONVectorSource( pred_geojson_uri, scene.raster_source.get_crs_transformer()) pred_geojson = pred_geojson_source.get_geojson() if scene.aoi_polygons: gt_geojson = filter_geojson_by_aoi( gt_geojson, scene.aoi_polygons) pred_geojson = filter_geojson_by_aoi( pred_geojson, scene.aoi_polygons) vect_scene_evaluation = self.create_evaluation() vect_scene_evaluation.compute_vector( gt_geojson, pred_geojson, mode, class_id) vect_evaluation.merge(vect_scene_evaluation, scene_id=scene.id) if not evaluation.is_empty(): evaluation.save(self.output_uri) if not vect_evaluation.is_empty(): vect_evaluation.save(self.vector_output_uri)
def create_scene(self, task_config: TaskConfig, tmp_dir: str) -> Scene: """Create this scene. Args: task - TaskConfig tmp_dir - Temporary directory to use """ raster_source = self.raster_source.create_source(tmp_dir) extent = raster_source.get_extent() crs_transformer = raster_source.get_crs_transformer() label_source = None if self.label_source: label_source = self.label_source.create_source( task_config, extent, crs_transformer, tmp_dir) label_store = None if self.label_store: label_store = self.label_store.create_store( task_config, extent, crs_transformer, tmp_dir) aoi_polygons = None if self.aoi_uris: aoi_polygons = [] for uri in self.aoi_uris: aoi_geojson = GeoJSONVectorSource( uri, crs_transformer).get_geojson() for f in aoi_geojson['features']: aoi_polygons.append(shape(f['geometry'])) return Scene(self.id, raster_source, label_source, label_store, aoi_polygons)
def save_image_crop(image_uri, image_crop_uri, label_uri=None, label_crop_uri=None, size=600, min_features=10, vector_labels=True): """Save a crop of an image to use for testing. If label_uri is set, the crop needs to cover >= min_features. Args: image_uri: URI of original image image_crop_uri: URI of cropped image to save label_uri: optional URI of label file label_crop_uri: optional URI of cropped labels to save size: height and width of crop Raises: ValueError if cannot find a crop satisfying min_features constraint. """ if not file_exists(image_crop_uri): print('Saving test crop to {}...'.format(image_crop_uri)) old_environ = os.environ.copy() try: request_payer = S3FileSystem.get_request_payer() if request_payer == 'requester': os.environ['AWS_REQUEST_PAYER'] = request_payer im_dataset = rasterio.open(image_uri) h, w = im_dataset.height, im_dataset.width extent = Box(0, 0, h, w) windows = extent.get_windows(size, size) if label_uri and vector_labels: crs_transformer = RasterioCRSTransformer.from_dataset( im_dataset) vs = GeoJSONVectorSource(label_uri, crs_transformer) geojson = vs.get_geojson() geoms = [] for f in geojson['features']: g = shape(f['geometry']) geoms.append(g) tree = STRtree(geoms) def p2m(x, y, z=None): return crs_transformer.pixel_to_map((x, y)) for w in windows: use_window = True if label_uri and vector_labels: w_polys = tree.query(w.to_shapely()) use_window = len(w_polys) >= min_features if use_window and label_crop_uri is not None: print('Saving test crop labels to {}...'.format( label_crop_uri)) label_crop_features = [ mapping(shapely.ops.transform(p2m, wp)) for wp in w_polys ] label_crop_json = { 'type': 'FeatureCollection', 'features': [{ 'geometry': f } for f in label_crop_features] } json_to_file(label_crop_json, label_crop_uri) if use_window: crop_image(image_uri, w, image_crop_uri) if not vector_labels and label_uri and label_crop_uri: crop_image(label_uri, w, label_crop_uri) break if not use_window: raise ValueError('Could not find a good crop.') finally: os.environ.clear() os.environ.update(old_environ)
def save_image_crop(image_uri, crop_uri, label_uri=None, size=600, min_features=10): """Save a crop of an image to use for testing. If label_uri is set, the crop needs to cover >= min_features. Args: image_uri: URI of original image crop_uri: URI of cropped image to save label_uri: optional URI of GeoJSON file size: height and width of crop Raises: ValueError if cannot find a crop satisfying min_features constraint. """ if not file_exists(crop_uri): print('Saving test crop to {}...'.format(crop_uri)) old_environ = os.environ.copy() try: request_payer = S3FileSystem.get_request_payer() if request_payer == 'requester': os.environ['AWS_REQUEST_PAYER'] = request_payer im_dataset = rasterio.open(image_uri) h, w = im_dataset.height, im_dataset.width extent = Box(0, 0, h, w) windows = extent.get_windows(size, size) if label_uri is not None: crs_transformer = RasterioCRSTransformer.from_dataset( im_dataset) vs = GeoJSONVectorSource(label_uri, crs_transformer) geojson = vs.get_geojson() geoms = [] for f in geojson['features']: g = shape(f['geometry']) geoms.append(g) tree = STRtree(geoms) for w in windows: use_window = True if label_uri is not None: w_polys = tree.query(w.to_shapely()) use_window = len(w_polys) >= min_features if use_window: w = w.rasterio_format() im = im_dataset.read(window=w) if np.mean(np.sum(im, axis=2).ravel() == 0) < 0.9: with tempfile.TemporaryDirectory() as tmp_dir: crop_path = get_local_path(crop_uri, tmp_dir) make_dir(crop_path, use_dirname=True) meta = im_dataset.meta meta['width'], meta['height'] = size, size meta['transform'] = rasterio.windows.transform( w, im_dataset.transform) with rasterio.open(crop_path, 'w', **meta) as dst: dst.colorinterp = im_dataset.colorinterp dst.write(im) upload_or_copy(crop_path, crop_uri) break if not use_window: raise ValueError('Could not find a good crop.') finally: os.environ.clear() os.environ.update(old_environ)