Beispiel #1
0
    def upload_annotation(self, predicted_data, project_id):
        self.cj.job.update(progress=95, statusComment="Uploading annotations")

        annotations = AnnotationCollection()
        components = ObjectFinder(predicted_data).find_components()
        locations = []
        for component in components:
            location = Polygon(component[0], component[1])

            if location.is_valid:
                locations.append(location)
            else:
                fixed = fix_geometry(location)

                if fixed.is_valid and not fixed.is_empty:
                    locations.append(fixed)

        for idx, loc in enumerate(locations):
            if not loc.is_valid:
                fixed = fix_geometry(loc)
                if fixed.is_valid and not fixed.is_empty:
                    loc[idx] = fixed

        annotations.extend([
            create_annotation_from_location(loc, self.image_instance.id,
                                            self.image_instance.height,
                                            project_id) for loc in locations
        ])

        annotations.save(chunk=20)
def extract_tiled_annotations(in_tiles, out_path, nj, label_merging=False):
    """
    in_images: iterable
        List of BiaflowsTile
    out_path: str
        Path of output tiles
    nj: BiaflowsJob
        A BIAflows job object
    label_merging: bool
        True for merging only polygons having the same label. False for merging based on geometry only
    """
    # regroup tiles by original images
    grouped_tiles = defaultdict(list)
    for in_tile in in_tiles:
        grouped_tiles[in_tile.in_image.original_filename].append(in_tile)

    default_tile_builder = DefaultTileBuilder()
    annotations = AnnotationCollection()
    for tiles in grouped_tiles.values():
        # recreate the topology
        in_image = tiles[0].in_image
        topology = BiaflowsSldcImage(in_image, is_2d=True).tile_topology(
            default_tile_builder,
            max_width=nj.flags["tile_width"],
            max_height=nj.flags["tile_height"],
            overlap=nj.flags["tile_overlap"])

        # extract polygons for each tile
        ids, polygons, labels = list(), list(), list()
        label = -1
        for tile in tiles:
            out_tile_path = os.path.join(out_path, tile.filename)
            slices = mask_to_objects_2d(imread(out_tile_path),
                                        offset=tile.tile.abs_offset[::-1])
            ids.append(tile.tile.identifier)
            polygons.append([s.polygon for s in slices])
            labels.append([s.label for s in slices])
            # save label for use after merging
            if len(slices) > 0:
                label = slices[0]

        # merge
        merged = SemanticMerger(tolerance=1).merge(
            ids, polygons, topology, labels=labels if label_merging else None)
        if label_merging:
            merged = merged[0]
        annotations.extend([
            create_annotation_from_slice(
                _slice=AnnotationSlice(p, label),
                id_image=in_image.object.id,
                image_height=in_image.object.height,
                id_project=nj.project.id,
            ) for p in merged
        ])
    return annotations
def mask_convert(mask,
                 image,
                 project_id,
                 mask_2d_fn,
                 mask_3d_fn,
                 track_prefix,
                 upload_group_id=False):
    """Generic function to convert a mask into an annotation collection

    Parameters
    ----------
    mask: ndarray
    image: ImageInstance
    project_id: int
    mask_2d_fn: callable
    mask_3d_fn: callable
    track_prefix: str
    upload_group_id: bool

    Returns
    -------
    tracks: TrackCollection
        Tracks, which have been saved
    annotations: AnnotationCollection
        Annotation which have NOT been saved
    """
    tracks = TrackCollection()
    annotations = AnnotationCollection()
    if mask.ndim == 2:
        slices = mask_2d_fn(mask)
        annotations.extend([
            create_annotation_from_slice(s,
                                         image.id,
                                         image.height,
                                         project_id,
                                         upload_group_id=upload_group_id)
            for s in slices
        ])
    elif mask.ndim == 3:
        slices = mask_3d_fn(mask)
        depth_to_slice = get_depth_to_slice(image)
        for obj_id, obj in enumerate(slices):
            track, curr_annotations = create_track_from_slices(
                image,
                obj,
                label=obj_id,
                depth2slice=depth_to_slice,
                track_prefix=track_prefix,
                id_project=project_id,
                upload_group_id=upload_group_id)
            tracks.append(track)
            annotations.extend(curr_annotations)
    else:
        raise ValueError("Only supports 2D or 3D output images...")
    return tracks, annotations
def extract_annotations_prttrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    Parameters:
    -----------
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    name_prefix: str
    kwargs: dict
    """

    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim != 3:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 3 dims..."
        )

    slices = mask_to_points_3d(data, time=True, assume_unique_labels=True)
    time_to_image = get_depth_to_slice(image)

    tracks = TrackCollection()
    annotations = AnnotationCollection()
    for slice_group in slices:
        curr_tracks, curr_annots = create_tracking_from_slice_group(
            image,
            slice_group,
            slice2point=lambda _slice: _slice.polygon,
            depth2slice=time_to_image,
            id_project=project_id,
            upload_object=False,
            track_prefix=track_prefix + "-particle",
            upload_group_id=True)
        tracks.extend(curr_tracks)
        annotations.extend(curr_annots)

    return tracks, annotations
def upload_data(problemclass,
                nj,
                inputs,
                out_path,
                monitor_params=None,
                **kwargs):
    """Upload annotations or any other related results to the server.

    Parameters
    ----------
    problemclass: str
        The problem class
    nj: CytomineJob|BiaflowsJob
        The CytomineJob or BiaflowsJob object. Ignored if do_export is True.
    inputs: list
        Input data as returned by the prepare_data
    out_path: str
        Output path
    monitor_params: dict|None
        A dictionnary of parameters to be passed to the data upload loop monitor.
    kwargs: dict
        Additional parameters for:
        * ObjDet/SptCnt: see function 'extract_annotations_objdet'
        * ObjSeg: see function 'extract_annotations_objseg'
    """
    if "is_2d" in kwargs:
        warnings.warn(
            "As of version 0.9.3, the 'is_2d' parameter is not needed anymore in function 'upload_data' and "
            "is now ignored.", DeprecationWarning)
    if not nj.flags["do_upload_annotations"]:
        return
    if nj.flags["tiling"] and (problemclass != CLASS_OBJSEG
                               and problemclass != CLASS_PIXCLA):
        print(
            "Annot. upload is only supported for one of {ObjSeg, PixCla} in 2D when tiling is enabled.. skipping !"
        )
        return
    if monitor_params is None:
        monitor_params = dict()

    annotations = AnnotationCollection()

    if nj.flags["tiling"]:
        annotations.extend(
            extract_tiled_annotations(
                inputs,
                out_path,
                nj,
                label_merging=problemclass == CLASS_PIXCLA))
    else:
        if problemclass == CLASS_OBJSEG:
            extract_fn = extract_annotations_objseg
        elif problemclass == CLASS_PIXCLA:
            extract_fn = extract_annotations_pixcla
        elif problemclass == CLASS_OBJDET or problemclass == CLASS_SPTCNT or problemclass == CLASS_LNDDET:
            extract_fn = extract_annotations_objdet
        elif problemclass == CLASS_LOOTRC or problemclass == CLASS_TRETRC:
            extract_fn = extract_annotations_lootrc
        elif problemclass == CLASS_PRTTRK:
            extract_fn = extract_annotations_prttrk
        elif problemclass == CLASS_OBJTRK:
            extract_fn = extract_annotations_objtrk
        else:
            raise NotImplementedError(
                "Upload data does not support problem class '{}' yet.".format(
                    problemclass))

        tracks = TrackCollection()
        monitor_params["prefix"] = "Extract masks/points/... from output data"
        for in_image in nj.monitor(inputs, **monitor_params):
            curr_tracks, curr_annots = extract_fn(out_path,
                                                  in_image,
                                                  nj.project.id,
                                                  track_prefix=str(nj.job.id),
                                                  **kwargs)
            tracks.extend(curr_tracks)
            annotations.extend(curr_annots)

    nj.job.update(
        statusComment="Upload extracted annotations (total: {})".format(
            len(annotations)))
    annotations.save(chunk=20, n_workers=min(4, cpu_count() * 2))
def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim < 3:
        raise ValueError(
            "Object tracking should be at least 3D (only {} spatial dimension(s) found)"
            .format(ndim))

    tracks = TrackCollection()
    annotations = AnnotationCollection()

    if ndim == 3:
        slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True)
        time_to_image = get_depth_to_slice(image)

        for slice_group in slices:
            curr_tracks, curr_annots = create_tracking_from_slice_group(
                image,
                slice_group,
                slice2point=lambda _slice: _slice.polygon.centroid,
                depth2slice=time_to_image,
                id_project=project_id,
                upload_object=True,
                upload_group_id=True,
                track_prefix=track_prefix + "-object")
            tracks.extend(curr_tracks)
            annotations.extend(curr_annots)
    elif ndim == 4:
        objects = mask_to_objects_3dt(mask=data)
        depths_to_image = get_depth_to_slice(image, depth=("time", "depth"))
        # TODO add tracking lines one way or another
        for time_steps in objects:
            label = time_steps[0][0].label
            track = Track(name="{}-{}".format(track_prefix, label),
                          id_image=image.id,
                          color=DEFAULT_COLOR).save()
            Property(track, key="label", value=label).save()
            annotations.extend([
                Annotation(location=change_referential(
                    p=slice.polygon, height=image.height).wkt,
                           id_image=image.id,
                           id_project=project_id,
                           id_tracks=[track.id],
                           slice=depths_to_image[(slice.time, slice.depth)].id)
                for slices in time_steps for slice in slices
            ])

            tracks.append(track)

    else:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 4 dims..."
        )

    return tracks, annotations
def extract_annotations_objdet(out_path,
                               in_image,
                               project_id,
                               track_prefix,
                               is_csv=False,
                               generate_mask=False,
                               result_file_suffix=".tif",
                               has_headers=False,
                               parse_fn=None,
                               **kwargs):
    """
    Parameters:
    -----------
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    is_csv: bool
        True if the output data are stored in a csv file
    generate_mask: bool
        If result is in a CSV, True for generating a mask based on the points in the csv. Ignored if is_csv is False.
        The mask file is generated in out_path with the name "{in_image.id}.png".
    result_file_suffix: str
        Suffix of the result filename (prefix being the image id).
    has_headers: bool
        True if the csv contains some headers (ignored if is_csv is False)
    parse_fn: callable
        A function for extracting coordinates from the csv file (already separated) line.
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    file = str(image.id) + result_file_suffix
    path = os.path.join(out_path, file)

    tracks = TrackCollection()
    annotations = AnnotationCollection()
    if not os.path.isfile(path):
        print("No output file at '{}' for image with id:{}.".format(
            path, image.id),
              file=sys.stderr)
        return annotations

    # whether the points are stored in a csv or a mask
    if is_csv:  # only 2d
        if parse_fn is None:
            raise ValueError(
                "parse_fn shouldn't be 'None' when result file is a CSV.")
        points = csv_to_points(path,
                               has_headers=has_headers,
                               parse_fn=parse_fn)
        annotations.extend([
            create_annotation_from_slice(point,
                                         image.id,
                                         image.height,
                                         project_id,
                                         upload_group_id=False)
            for point in points
        ])

        if generate_mask:
            image, dim_order, _ = imread(in_image.filepath, return_order=True)
            mask = slices_to_mask(points, image.shape).squeeze()
            imwrite_ome(os.path.join(out_path, in_image.filename),
                        mask,
                        dim_order=dim_order)
    else:
        # points stored in a mask
        data, dim_order, _ = imread(path)
        tracks, annotations = mask_convert(
            data,
            image,
            project_id,
            mask_2d_fn=mask_to_points_2d,
            mask_3d_fn=lambda m: mask_to_points_3d(
                m, time=False, assume_unique_labels=False),
            track_prefix=track_prefix + "-object",
            upload_group_id=get_dimensionality(dim_order) > 2)

    return tracks, annotations