Beispiel #1
0
def shapes_to_segs(
    drawn_shapes_data, image_display_top_figure, image_display_side_figure,
):
    masks = np.zeros_like(img)
    for j, (graph_figure, (hscale, wscale)) in enumerate(
        zip([image_display_top_figure, image_display_side_figure], hwscales)
    ):
        fig = go.Figure(**graph_figure)
        # we use the width and the height of the first layout image (this will be
        # one of the images of the brain) to get the bounding box of the SVG that we
        # want to rasterize
        width, height = [fig.layout.images[0][sz] for sz in ["sizex", "sizey"]]
        for i in range(seg_img.shape[j]):
            shape_args = [
                dict(width=width, height=height, shape=s)
                for s in drawn_shapes_data[j][i]
            ]
            if len(shape_args) > 0:
                mask = shape_utils.shapes_to_mask(
                    shape_args,
                    # we only have one label class, so the mask is given value 1
                    1,
                )
                # TODO: Maybe there's a more elegant way to downsample the mask?
                np.moveaxis(masks, 0, j)[i, :, :] = mask[::hscale, ::wscale]
    found_segs_tensor = np.zeros_like(img)
    if DEBUG_MASK:
        found_segs_tensor[masks == 1] = 1
    else:
        # find labels beneath the mask
        labels = set(seg[1 == masks])
        # for each label found, select all of the segment with that label
        for l in labels:
            found_segs_tensor[seg == l] = 1
    return found_segs_tensor
def compute_segmentations(
    shapes,
    img_path="assets/segmentation_img.jpg",
    features=None,
    shape_layers=None,
    label_to_colors_args={},
):

    # load original image
    img = img_to_ubyte_array(img_path)

    # convert shapes to mask
    shape_args = [{
        "width": img.shape[1],
        "height": img.shape[0],
        "shape": shape
    } for shape in shapes]
    if (shape_layers is None) or (len(shape_layers) != len(shapes)):
        shape_layers = [(n + 1) for n, _ in enumerate(shapes)]
    mask = shape_utils.shapes_to_mask(shape_args, shape_layers)

    # do segmentation and return this
    t1 = time()
    clf = RandomForestClassifier(n_estimators=50,
                                 n_jobs=-1,
                                 max_depth=8,
                                 max_samples=0.05)
    seg, clf = fit_segmenter(mask, features, clf)
    t2 = time()
    print(t2 - t1)
    color_seg = label_to_colors(seg, **label_to_colors_args)
    # color_seg is a 3d tensor representing a colored image whereas seg is a
    # matrix whose entries represent the classes
    return (color_seg, seg, clf)
Beispiel #3
0
def compute_segmentations(
    shapes,
    img_path="assets/segmentation_img.jpg",
    segmenter_args={},
    shape_layers=None,
    label_to_colors_args={},
):

    # load original image
    img = img_to_ubyte_array(img_path)

    # convert shapes to mask
    shape_args = [{
        "width": img.shape[1],
        "height": img.shape[0],
        "shape": shape
    } for shape in shapes]
    if (shape_layers is None) or (len(shape_layers) != len(shapes)):
        shape_layers = [(n + 1) for n, _ in enumerate(shapes)]
    mask = shape_utils.shapes_to_mask(shape_args, shape_layers)

    # do segmentation and return this
    seg, clf = trainable_segmentation(img, mask, **segmenter_args)
    color_seg = label_to_colors(seg, **label_to_colors_args)
    # color_seg is a 3d tensor representing a colored image whereas seg is a
    # matrix whose entries represent the classes
    return (color_seg, seg, clf)
def draw_shapes_react(
    drawn_shapes_data, image_display_top_figure, image_display_side_figure
):

    if any(
        [
            e is None
            for e in [
                drawn_shapes_data,
                image_display_top_figure,
                image_display_side_figure,
            ]
        ]
    ):
        return dash.no_update
    masks = np.zeros_like(img)
    for j, (graph_figure, (hscale, wscale)) in enumerate(
        zip([image_display_top_figure, image_display_side_figure], hwscales)
    ):
        fig = go.Figure(**graph_figure)
        # we use the width and the height of the first layout image (this will be
        # one of the images of the brain) to get the bounding box of the SVG that we
        # want to rasterize
        width, height = [fig.layout.images[0][sz] for sz in ["sizex", "sizey"]]
        for i in range(seg_img.shape[j]):
            shape_args = [
                dict(width=width, height=height, shape=s)
                for s in drawn_shapes_data[j][i]
            ]
            if len(shape_args) > 0:
                mask = shape_utils.shapes_to_mask(
                    shape_args,
                    # we only have one label class, so the mask is given value 1
                    1,
                )
                # TODO: Maybe there's a more elegant way to downsample the mask?
                np.moveaxis(masks, 0, j)[i, :, :] = mask[::hscale, ::wscale]
    found_segs_tensor = np.zeros_like(img)
    if DEBUG_MASK:
        found_segs_tensor[masks == 1] = 1
    else:
        # find labels beneath the mask
        labels = set(seg[1 == masks])
        # for each label found, select all of the segment with that label
        for l in labels:
            found_segs_tensor[seg == l] = 1
    # convert to a colored image
    fst_colored = image_utils.label_to_colors(
        found_segs_tensor,
        colormap=["#000000", "#8A2BE2"],
        alpha=[0, 128],
        color_class_offset=0,
    )
    fstc_slices = [
        [
            array_to_data_url(np.moveaxis(fst_colored, 0, j)[i])
            for i in range(np.moveaxis(fst_colored, 0, j).shape[0])
        ]
        for j in range(NUM_DIMS_DISPLAYED)
    ]
    return fstc_slices