Beispiel #1
0
def make_default_figure(
    images=[DEFAULT_IMAGE_PATH],
    stroke_color=class_to_color(DEFAULT_LABEL_CLASS),
    stroke_width=DEFAULT_STROKE_WIDTH,
    shapes=[],
):
    fig = plot_common.dummy_fig()
    plot_common.add_layout_images_to_fig(fig, images)
    fig.update_layout({
        "dragmode": "drawopenpath",
        "shapes": shapes,
        "newshape.line.color": stroke_color,
        "newshape.line.width": stroke_width,
        "margin": dict(l=0, r=0, b=0, t=0, pad=4),
    })
    return fig
def make_default_figure(
    images=[],
    stroke_color=DEFAULT_STROKE_COLOR,
    stroke_width=DEFAULT_STROKE_WIDTH,
    shapes=[],
    img_args=dict(layer="above"),
    width_scale=1,
    height_scale=1,
):
    fig = plot_common.dummy_fig()
    plot_common.add_layout_images_to_fig(
        fig,
        images,
        img_args=img_args,
        width_scale=width_scale,
        height_scale=height_scale,
        update_figure_dims="height",
    )
    # add an empty image with the same size as the greatest of the already added
    # images so that we can add computed masks clientside later
    mwidth, mheight = [
        max([im[sz] for im in fig["layout"]["images"]]) for sz in ["sizex", "sizey"]
    ]
    fig.add_layout_image(
        dict(
            source="",
            xref="x",
            yref="y",
            x=0,
            y=0,
            sizex=mwidth,
            sizey=mheight,
            sizing="contain",
            layer="above",
        )
    )
    fig.update_layout(
        {
            "dragmode": "drawopenpath",
            "shapes": shapes,
            "newshape.line.color": stroke_color,
            "newshape.line.width": stroke_width,
            "margin": dict(l=0, r=0, b=0, t=0, pad=4),
        }
    )
    return fig
Beispiel #3
0
def test_image_figure(shape=(300, 500), color="#002EA7"):
    """ Make a figure containing an image that is just a constant color for
    testing. """
    fig = plot_common.dummy_fig()
    im = np.ones(shape, dtype="uint8")
    imc = image_utils.label_to_colors(im, ["#000000", color], alpha=255)
    imcu = plot_common.img_array_to_uri(imc)
    fig = plot_common.add_layout_images_to_fig(fig, [imcu])
    # and we make it so you can draw for fun
    fig.update_layout({
        "dragmode": "drawopenpath",
        "shapes": [],
        "newshape.line.color": "purple",
        "newshape.line.width": 5,
        "margin": dict(l=0, r=0, b=0, t=0, pad=4),
    })
    return fig
Beispiel #4
0
def annotation_react(
    graph_relayoutData,
    any_label_class_button_value,
    stroke_width_value,
    show_segmentation_value,
    download_button_n_clicks,
    download_image_button_n_clicks,
    segmentation_features_value,
    sigma_range_slider_value,
    masks_data,
):
    classified_image_store_data = dash.no_update
    classifier_store_data = dash.no_update
    cbcontext = [p["prop_id"] for p in dash.callback_context.triggered][0]
    if cbcontext in [
            "segmentation-features.value", "sigma-range-slider.value"
    ] or (("Show segmentation" in show_segmentation_value) and
          (len(masks_data["shapes"]) > 0)):
        segmentation_features_dict = {
            "intensity": False,
            "edges": False,
            "texture": False,
        }
        for feat in segmentation_features_value:
            segmentation_features_dict[feat] = True
        t1 = time()
        features = compute_features(
            img,
            **segmentation_features_dict,
            sigma_min=sigma_range_slider_value[0],
            sigma_max=sigma_range_slider_value[1],
        )
        t2 = time()
        print(t2 - t1)
    if cbcontext == "graph.relayoutData":
        if "shapes" in graph_relayoutData.keys():
            masks_data["shapes"] = graph_relayoutData["shapes"]
        else:
            return dash.no_update
    stroke_width = int(round(2**(stroke_width_value)))
    # find label class value by finding button with the most recent click
    if any_label_class_button_value is None:
        label_class_value = DEFAULT_LABEL_CLASS
    else:
        label_class_value = max(
            enumerate(any_label_class_button_value),
            key=lambda t: 0 if t[1] is None else t[1],
        )[0]

    fig = make_default_figure(
        stroke_color=class_to_color(label_class_value),
        stroke_width=stroke_width,
        shapes=masks_data["shapes"],
    )
    # We want the segmentation to be computed
    if ("Show segmentation"
            in show_segmentation_value) and (len(masks_data["shapes"]) > 0):
        segimgpng = None
        try:
            feature_opts = dict(
                segmentation_features_dict=segmentation_features_dict)
            feature_opts["sigma_min"] = sigma_range_slider_value[0]
            feature_opts["sigma_max"] = sigma_range_slider_value[1]
            segimgpng, clf = show_segmentation(DEFAULT_IMAGE_PATH,
                                               masks_data["shapes"], features,
                                               feature_opts)
            if cbcontext == "download-button.n_clicks":
                classifier_store_data = clf
            if cbcontext == "download-image-button.n_clicks":
                classified_image_store_data = plot_common.pil_image_to_uri(
                    blend_image_and_classified_regions_pil(
                        PIL.Image.open(DEFAULT_IMAGE_PATH), segimgpng))
        except ValueError:
            # if segmentation fails, draw nothing
            pass
        images_to_draw = []
        if segimgpng is not None:
            images_to_draw = [segimgpng]
        fig = plot_common.add_layout_images_to_fig(fig, images_to_draw)
    fig.update_layout(uirevision="segmentation")
    return (
        fig,
        masks_data,
        "Stroke width: %d" % (stroke_width, ),
        classifier_store_data,
        classified_image_store_data,
    )
Beispiel #5
0
def annotation_react(
    graph_relayoutData,
    any_label_class_button_value,
    stroke_width_value,
    show_segmentation_value,
    segmentation_features_value,
    sigma_range_slider_value,
    masks_data,
    segmentation_data,
):
    print(segmentation_data, )
    classified_image_store_data = dash.no_update
    classifier_store_data = dash.no_update
    cbcontext = [p["prop_id"] for p in dash.callback_context.triggered][0]
    if cbcontext == "graph.relayoutData":
        if "shapes" in graph_relayoutData.keys():
            masks_data["shapes"] = graph_relayoutData["shapes"]
        else:
            return dash.no_update
    stroke_width = int(round(2**(stroke_width_value)))
    # find label class value by finding button with the greatest n_clicks
    if any_label_class_button_value is None:
        label_class_value = DEFAULT_LABEL_CLASS
    else:
        label_class_value = max(
            enumerate(any_label_class_button_value),
            key=lambda t: 0 if t[1] is None else t[1],
        )[0]
    fig = make_default_figure(
        stroke_color=class_to_color(label_class_value),
        stroke_width=stroke_width,
        shapes=masks_data["shapes"],
    )
    if ("Show segmentation"
            in show_segmentation_value) and (len(masks_data["shapes"]) > 0):
        # to store segmentation data in the store, we need to base64 encode the
        # PIL.Image and hash the set of shapes to use this as the key
        # to retrieve the segmentation data, we need to base64 decode to a PIL.Image
        # because this will give the dimensions of the image
        sh = shapes_to_key([
            masks_data["shapes"],
            segmentation_features_value,
            sigma_range_slider_value,
        ])
        if sh in segmentation_data.keys():
            segimgpng = look_up_seg(segmentation_data, sh)
        else:
            segimgpng = None
            try:
                feature_opts = {
                    key: (key in segmentation_features_value)
                    for key in SEG_FEATURE_TYPES
                }
                feature_opts["sigma_min"] = sigma_range_slider_value[0]
                feature_opts["sigma_max"] = sigma_range_slider_value[1]
                if len(segmentation_features_value) > 0:
                    segimgpng, classifier_store_data = show_segmentation(
                        DEFAULT_IMAGE_PATH, masks_data["shapes"], feature_opts)
                    segmentation_data = store_shapes_seg_pair(
                        segmentation_data, sh, segimgpng)
                    classified_image_store_data = plot_common.pil_image_to_uri(
                        blend_image_and_classified_regions_pil(
                            PIL.Image.open(DEFAULT_IMAGE_PATH), segimgpng))
            except ValueError:
                # if segmentation fails, draw nothing
                pass
        images_to_draw = []
        if segimgpng is not None:
            images_to_draw = [segimgpng]
        fig = plot_common.add_layout_images_to_fig(fig, images_to_draw)
    return (
        fig,
        masks_data,
        segmentation_data,
        "Stroke width: %d" % (stroke_width, ),
        classifier_store_data,
        classified_image_store_data,
    )