def testTwoObjectsOneSpanning(self):
        image = np.zeros([3, 100, 200], dtype=np.uint8)
        image[0, :, :] = draw_square_by_corner(image[0, :, :], 10, (5, 10),
                                               100)
        image[1, :, :] = draw_square_by_corner(image[1, :, :], 10, (5, 10),
                                               100)
        image[2, :, :] = draw_square_by_corner(image[2, :, :], 12, (5, 10),
                                               100)
        image[1, :, :] = draw_square_by_corner(image[1, :, :], 25, (45, 40),
                                               200)
        slices = mask_to_objects_3d(image, assume_unique_labels=True)

        self.assertIsInstance(slices, list)
        self.assertEqual(len(slices), 2, msg="found 2 objects")

        fslice1 = [sl for sl in slices if len(sl) > 1]
        self.assertEqual(
            len(fslice1),
            1,
            msg="there is exactly one object with more than one slice")
        slice1 = fslice1[0]
        self.assertEqual(slice1[0].label, 100)
        self.assertEqual(slice1[0].depth, 0)
        self.assertTrue(slice1[0].polygon.equals(box(10, 5, 21, 16)))
        self.assertEqual(slice1[1].label, 100)
        self.assertEqual(slice1[1].depth, 1)
        self.assertTrue(slice1[1].polygon.equals(box(10, 5, 21, 16)))
        self.assertEqual(slice1[2].label, 100)
        self.assertEqual(slice1[2].depth, 2)
        self.assertTrue(slice1[2].polygon.equals(box(10, 5, 23, 18)))

        fslice2 = [sl for sl in slices if len(sl) == 1]
        self.assertEqual(len(fslice2),
                         1,
                         msg="there is exactly one object with one slice")
        slice2 = fslice2[0]
        self.assertEqual(len(slice2), 1)
        self.assertEqual(slice2[0].label, 200)
        self.assertEqual(slice2[0].depth, 1)
        self.assertTrue(slice2[0].polygon.equals(box(40, 45, 66, 71)))
Exemplo n.º 2
0
def extract_annotations_pixcla(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    Parameters
    ----------
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    return mask_convert(data,
                        image,
                        project_id,
                        mask_2d_fn=mask_to_objects_2d,
                        mask_3d_fn=lambda m: mask_to_objects_3d(
                            m, background=0, assume_unique_labels=False),
                        track_prefix=track_prefix + "-object",
                        upload_group_id=get_dimensionality(dim_order) > 2)
Exemplo n.º 3
0
def skeleton_mask_to_objects_3d(mask,
                                background=0,
                                offset=None,
                                assume_unique_labels=False,
                                time=False,
                                projection=0):
    """Process a 3d skeleton mask

    Parameters:
    -----------
    mask: ndarray
    background: int
    offset: tuple
    assume_unique_labels: bool
    time: bool
    projection: int
        Number of nearby frames to project in the current one. -1 for projecting from the whole volume.
        0 for no projection at all (default)
    """
    selem = np.ones([1, 3, 3], dtype=np.bool)
    selem[0, :, :] = morphology.disk(1)
    dilated = morphology.dilation(mask, selem=selem)

    # projection of skeleton from nearby frames
    if projection != 0:
        projected = np.zeros(dilated.shape, dtype=dilated.dtype)
        z_dims = dilated.shape[0]
        for z in range(z_dims):
            z_start = 0 if projection == -1 else max(0, z - projection)
            z_end = z_dims if projection == -1 else min(
                z_dims, z + projection + 1)
            projected[z, :, :] = np.max(dilated[z_start:z_end, :, :], axis=0)
        dilated = projected

    return mask_to_objects_3d(dilated,
                              background=background,
                              offset=offset,
                              time=time,
                              assume_unique_labels=assume_unique_labels)
Exemplo n.º 4
0
def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix,
                               **kwargs):
    """
    out_path: str
    in_image: BiaflowsCytomineInput
    project_id: int
    track_prefix: str
    kwargs: dict
    """
    image = in_image.object
    path = os.path.join(out_path, in_image.filename)
    data, dim_order, _ = imread(path, return_order=True)
    ndim = get_dimensionality(dim_order)

    if ndim < 3:
        raise ValueError(
            "Object tracking should be at least 3D (only {} spatial dimension(s) found)"
            .format(ndim))

    tracks = TrackCollection()
    annotations = AnnotationCollection()

    if ndim == 3:
        slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True)
        time_to_image = get_depth_to_slice(image)

        for slice_group in slices:
            curr_tracks, curr_annots = create_tracking_from_slice_group(
                image,
                slice_group,
                slice2point=lambda _slice: _slice.polygon.centroid,
                depth2slice=time_to_image,
                id_project=project_id,
                upload_object=True,
                upload_group_id=True,
                track_prefix=track_prefix + "-object")
            tracks.extend(curr_tracks)
            annotations.extend(curr_annots)
    elif ndim == 4:
        objects = mask_to_objects_3dt(mask=data)
        depths_to_image = get_depth_to_slice(image, depth=("time", "depth"))
        # TODO add tracking lines one way or another
        for time_steps in objects:
            label = time_steps[0][0].label
            track = Track(name="{}-{}".format(track_prefix, label),
                          id_image=image.id,
                          color=DEFAULT_COLOR).save()
            Property(track, key="label", value=label).save()
            annotations.extend([
                Annotation(location=change_referential(
                    p=slice.polygon, height=image.height).wkt,
                           id_image=image.id,
                           id_project=project_id,
                           id_tracks=[track.id],
                           slice=depths_to_image[(slice.time, slice.depth)].id)
                for slices in time_steps for slice in slices
            ])

            tracks.append(track)

    else:
        raise ValueError(
            "Annotation extraction for object tracking does not support masks with more than 4 dims..."
        )

    return tracks, annotations