def img_to_tracks(fname): # Convert the tracking results saved in an OME-TIFF image to a dictionary of tracks img_data, order, _ = imread(fname, return_order=True) track_dict = defaultdict(list) where = np.where(img_data > 0) order_idx = {d: i for i, d in enumerate(order)} for val, point in zip(img_data[where], zip(*where)): track_dict[val].append([(point[order_idx.get(d, -1)] if d in order_idx else 0) for d in "TXYZ"]) return track_dict
def extract_tiled_annotations(in_tiles, out_path, nj, label_merging=False): """ in_images: iterable List of BiaflowsTile out_path: str Path of output tiles nj: BiaflowsJob A BIAflows job object label_merging: bool True for merging only polygons having the same label. False for merging based on geometry only """ # regroup tiles by original images grouped_tiles = defaultdict(list) for in_tile in in_tiles: grouped_tiles[in_tile.in_image.original_filename].append(in_tile) default_tile_builder = DefaultTileBuilder() annotations = AnnotationCollection() for tiles in grouped_tiles.values(): # recreate the topology in_image = tiles[0].in_image topology = BiaflowsSldcImage(in_image, is_2d=True).tile_topology( default_tile_builder, max_width=nj.flags["tile_width"], max_height=nj.flags["tile_height"], overlap=nj.flags["tile_overlap"]) # extract polygons for each tile ids, polygons, labels = list(), list(), list() label = -1 for tile in tiles: out_tile_path = os.path.join(out_path, tile.filename) slices = mask_to_objects_2d(imread(out_tile_path), offset=tile.tile.abs_offset[::-1]) ids.append(tile.tile.identifier) polygons.append([s.polygon for s in slices]) labels.append([s.label for s in slices]) # save label for use after merging if len(slices) > 0: label = slices[0] # merge merged = SemanticMerger(tolerance=1).merge( ids, polygons, topology, labels=labels if label_merging else None) if label_merging: merged = merged[0] annotations.extend([ create_annotation_from_slice( _slice=AnnotationSlice(p, label), id_image=in_image.object.id, image_height=in_image.object.height, id_project=nj.project.id, ) for p in merged ]) return annotations
def extract_annotations_prttrk(out_path, in_image, project_id, track_prefix, **kwargs): """ Parameters: ----------- out_path: str in_image: BiaflowsCytomineInput project_id: int name_prefix: str kwargs: dict """ image = in_image.object path = os.path.join(out_path, in_image.filename) data, dim_order, _ = imread(path, return_order=True) ndim = get_dimensionality(dim_order) if ndim != 3: raise ValueError( "Annotation extraction for object tracking does not support masks with more than 3 dims..." ) slices = mask_to_points_3d(data, time=True, assume_unique_labels=True) time_to_image = get_depth_to_slice(image) tracks = TrackCollection() annotations = AnnotationCollection() for slice_group in slices: curr_tracks, curr_annots = create_tracking_from_slice_group( image, slice_group, slice2point=lambda _slice: _slice.polygon, depth2slice=time_to_image, id_project=project_id, upload_object=False, track_prefix=track_prefix + "-particle", upload_group_id=True) tracks.extend(curr_tracks) annotations.extend(curr_annots) return tracks, annotations
def extract_annotations_pixcla(out_path, in_image, project_id, track_prefix, **kwargs): """ Parameters ---------- out_path: str in_image: BiaflowsCytomineInput project_id: int track_prefix: str kwargs: dict """ image = in_image.object path = os.path.join(out_path, in_image.filename) data, dim_order, _ = imread(path, return_order=True) return mask_convert(data, image, project_id, mask_2d_fn=mask_to_objects_2d, mask_3d_fn=lambda m: mask_to_objects_3d( m, background=0, assume_unique_labels=False), track_prefix=track_prefix + "-object", upload_group_id=get_dimensionality(dim_order) > 2)
def extract_annotations_objdet(out_path, in_image, project_id, track_prefix, is_csv=False, generate_mask=False, result_file_suffix=".tif", has_headers=False, parse_fn=None, **kwargs): """ Parameters: ----------- out_path: str in_image: BiaflowsCytomineInput project_id: int is_csv: bool True if the output data are stored in a csv file generate_mask: bool If result is in a CSV, True for generating a mask based on the points in the csv. Ignored if is_csv is False. The mask file is generated in out_path with the name "{in_image.id}.png". result_file_suffix: str Suffix of the result filename (prefix being the image id). has_headers: bool True if the csv contains some headers (ignored if is_csv is False) parse_fn: callable A function for extracting coordinates from the csv file (already separated) line. track_prefix: str kwargs: dict """ image = in_image.object file = str(image.id) + result_file_suffix path = os.path.join(out_path, file) tracks = TrackCollection() annotations = AnnotationCollection() if not os.path.isfile(path): print("No output file at '{}' for image with id:{}.".format( path, image.id), file=sys.stderr) return annotations # whether the points are stored in a csv or a mask if is_csv: # only 2d if parse_fn is None: raise ValueError( "parse_fn shouldn't be 'None' when result file is a CSV.") points = csv_to_points(path, has_headers=has_headers, parse_fn=parse_fn) annotations.extend([ create_annotation_from_slice(point, image.id, image.height, project_id, upload_group_id=False) for point in points ]) if generate_mask: image, dim_order, _ = imread(in_image.filepath, return_order=True) mask = slices_to_mask(points, image.shape).squeeze() imwrite_ome(os.path.join(out_path, in_image.filename), mask, dim_order=dim_order) else: # points stored in a mask data, dim_order, _ = imread(path) tracks, annotations = mask_convert( data, image, project_id, mask_2d_fn=mask_to_points_2d, mask_3d_fn=lambda m: mask_to_points_3d( m, time=False, assume_unique_labels=False), track_prefix=track_prefix + "-object", upload_group_id=get_dimensionality(dim_order) > 2) return tracks, annotations