Ejemplo n.º 1
0
def upload_content(list_image_string, list_filenames, click, downsample,
                   n_rows, n_cols):

    downsample = int(downsample)
    if list_image_string is not None:
        order = np.argsort(list_filenames)
        image_list = [
            np.asarray(image_string_to_PILImage(list_image_string[i]))
            for i in order
        ]
        if downsample > 1:
            ratio = 1.0 / downsample
            multichannel = image_list[0].ndim > 2
            image_list = [
                transform.rescale(image,
                                  ratio,
                                  multichannel=multichannel,
                                  preserve_range=True).astype(np.uint8)
                for image in image_list
            ]
        res = tile_images(image_list, n_rows, n_cols)
        return array_to_data_url(res)
    elif click:
        res = demo_data()
        tmp = array_to_data_url(res)
        return tmp

    raise PreventUpdate
def segmentation(string, content, NDVIcontent):
    global ROIRGB, ROINDVI
    if string:
        data = content.encode("utf8").split(b";base64,")[1]
        NDVIdata = NDVIcontent.encode("utf8").split(b";base64,")[1]
        img = io.BytesIO()
        imgNDVI = io.BytesIO()
        img.write(base64.b64decode(data))
        imgNDVI.write(base64.b64decode(NDVIdata))
        img.seek(0)
        imgNDVI.seek(0)
        i = np.asarray(bytearray(img.read()), dtype=np.uint8)
        i = cv2.imdecode(i, cv2.IMREAD_COLOR)
        iNDVI = np.asarray(bytearray(imgNDVI.read()), dtype=np.uint8)
        iNDVI = cv2.imdecode(iNDVI, cv2.IMREAD_COLOR)
        mask = parse_jsonstring(string, (i.shape[0],i.shape[1]))
        ret,thresh = cv2.threshold(np.array(mask, dtype=np.uint8), 0, 255, cv2.THRESH_BINARY)
        im_floodfill = thresh.copy()
        h, w = thresh.shape[:2]
        m = np.zeros((h+2, w+2), np.uint8)
        cv2.floodFill(im_floodfill, m, (0,0), 255)
        im_floodfill_inv = cv2.bitwise_not(im_floodfill)
        im_out = thresh | im_floodfill_inv
        RGBimg = cv2.bitwise_and(i, i, mask=im_out)
        RGBimg = cv2.cvtColor(RGBimg, cv2.COLOR_BGR2RGB)
        target_size = (RGBimg.shape[1],(RGBimg.shape[0]))
        iNDVI = cv2.resize(iNDVI, target_size)
        NDVIimg = cv2.bitwise_and(iNDVI, iNDVI, mask=im_out)
        NDVIimg = cv2.cvtColor(NDVIimg, cv2.COLOR_BGR2RGB)

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
        cnts = sorted(contours, key=lambda c: cv2.contourArea(c), reverse=True)  # finds the largest selection, which is a limitation to multiple selection (will be changed in the future versions)
        (x,y,w,h) = cv2.boundingRect(cnts[0])
        ROIRGB = RGBimg[y:y+h, x:x+w]
        ROINDVI = NDVIimg[y:y+h, x:x+w]
        cv2.imwrite(static_image_route+'RGB_cropped.png', cv2.cvtColor(ROIRGB, cv2.COLOR_RGB2BGR ))
        cv2.imwrite(static_image_route+'CIR_cropped.png', cv2.cvtColor(ROINDVI, cv2.COLOR_RGB2BGR ))
        with ZipFile(static_image_route+'cropped.zip', 'w') as zipObj2:
            zipObj2.write(static_image_route+'RGB_cropped.png')
            zipObj2.write(static_image_route+'CIR_cropped.png')
            location = os.path.join(static_image_route,'cropped.zip')

    else:
        raise PreventUpdate
    return array_to_data_url(img_as_ubyte(ROIRGB)), array_to_data_url(img_as_ubyte(ROINDVI)), html.A(html.Button('Download',style={'display':'block'
                                    ,'position':'relative',
                                    'top': '45%','left': '45%',
                                    'font-size': '16px',
                                    'padding': '8px 12px',
                                    'border-radius': '4px',
                                    'text-align': 'center',
                                    'align':'center',
                                    'color':'black',
                                    'font-family':'Times New Roman',
                                    'textAlign':'center'}), href=location) #, html.Div([html.Button('Save Image', id='saveImg')])
Ejemplo n.º 3
0
def lookup_callback(n_clicks, lookup_val, id_dict, prog_type):
    #time.sleep(1000)
    try:
        print("entered callback")
        print(lookup_val)
        lookup_val = int(lookup_val)
        print(id_dict)
        dataset = id_dict["dataset"]
        #method = id_dict["dataset"]
        pkl = dataset + ".pkl"
        if (prog_type == "layer-wise"):
            embeddings_dir = embeddings_dir_layer
        else:
            embeddings_dir = embeddings_dir_epoch
        img = pickle.load(open(os.path.join(embeddings_dir, pkl),
                               "rb"))[lookup_val].astype(np.uint8)
        print(img.shape)
        child = html.Img(src=array_to_data_url(img_as_ubyte(img)),
                         style={
                             "width": "300px",
                             "height": "300px"
                         })
        return ([child])
    except Exception as e:
        print(e)
        raise PreventUpdate
def interpolate(n_clicks, annotations):
    if n_clicks is None:
        return dash.no_update
    interps = {}
    keys = np.sort([int(key) for key in annotations])
    z_min = keys[0]
    z_max = keys[-1]
    volume = 0
    for z in np.arange(z_min + 1, z_max, dtype=np.uint8):
        i = np.searchsorted(keys, z)
        t = (z - keys[i - 1]) / (keys[i] - keys[i - 1])
        path1 = path_to_indices(annotations[str(keys[i])]["path"])
        rr, cc = draw.polygon(path1[:, 1], path1[:, 0])
        poly1 = np.zeros((l_lat, l_lat))
        poly1[rr, cc] = 1
        path2 = path_to_indices(annotations[str(keys[i - 1])]["path"])
        rr, cc = draw.polygon(path2[:, 1], path2[:, 0])
        poly2 = np.zeros((l_lat, l_lat))
        poly2[rr, cc] = 1
        poly = (t * poly1 + (1 - t) * poly2) > 0.5
        interps[str(z)] = array_to_data_url(
            img_as_ubyte(
                segmentation.mark_boundaries(img_1[z], poly, mode="thick")))
        volume += poly.sum()
    volume *= np.abs(np.linalg.det(mat)) / 1000
    result = f"The volume of the object is {volume:.0f} cm3"
    return interps, result
Ejemplo n.º 5
0
def modify_result(contrast, brightness, image_string):
    if image_string is None:
        raise PreventUpdate
    img = np.asarray(image_string_to_PILImage(image_string))
    img = contrast_adjust(img, contrast)
    img = brightness_adjust(img, brightness)
    return array_to_data_url(img)
Ejemplo n.º 6
0
def modify_content(n_cl, n_rows, n_cols, overlap, estimate, image_string,
                   vals):
    blending = 0
    if vals is not None:
        blending = 1 in vals
    if image_string is None:
        raise PreventUpdate
    tiles = untile_images(image_string, n_rows, n_cols)
    if estimate is not None and len(estimate) > 0:

        overlap_dict = _sort_props_lines(estimate, tiles.shape[2],
                                         tiles.shape[3], n_cols)
    else:

        overlap_dict = None
    canvas = register_tiles(
        tiles,
        n_rows,
        n_cols,
        overlap_global=overlap,
        overlap_local=overlap_dict,
        pad=np.max(tiles.shape[2:]),
        blending=blending,
    )
    return array_to_data_url(canvas)
Ejemplo n.º 7
0
def remove_background(json_data, image):
    if json_data:
        # ➊ imageが値をもつ場合、それを基に画像のnumpy.ndarrayに変換する
        if image:
            image_array = image_string_to_PILImage(image)
            image_array = np.asarray(image_array)
        # imageが値をもたない場合、imreadで画像を読み込む
        else:
            image_array = io.imread(image_path)
        # ➋ 画像のアレイのサイズを変数shapeに代入する
        shape = image_array.shape[:2]

        # ➌ 書き込みのJSONデータをパースし、ブール値に変換する
        try:
            mask = parse_jsonstring(json_data, shape=shape)
        except IndexError:
            raise PreventUpdate

        if mask.sum() > 0:  # ➍
            seg = superpixel_color_segmentation(image_array, mask)
        else:
            seg = np.ones(shape)
        filled_image = np.copy(image_array)
        filled_image[np.logical_not(seg)] = np.array([255, 255, 255],
                                                     dtype="uint8")  # ➎
        return array_to_data_url(filled_image)  # ➏

    else:
        PreventUpdate
Ejemplo n.º 8
0
def make_figure(img_array):
    img_uri = array_to_data_url(img_array)
    width = img.shape[1]
    height = img.shape[0]
    fig = go.Figure()
    # Add trace
    fig.add_trace(go.Scatter(x=[], y=[]))
    # Add images
    fig.add_layout_image(
        dict(source=img_uri,
             xref="x",
             yref="y",
             x=0,
             y=0,
             sizex=width,
             sizey=height,
             sizing="contain",
             layer="below"))
    fig.update_layout(template=None)
    fig.update_xaxes(showgrid=False,
                     range=(0, width),
                     showticklabels=False,
                     zeroline=False)
    fig.update_yaxes(showgrid=False,
                     scaleanchor='x',
                     range=(height, 0),
                     showticklabels=False,
                     zeroline=False)
    fig.update_xaxes(showgrid=False)
    fig.update_yaxes(showgrid=False)
    return fig
Ejemplo n.º 9
0
def update_data(string):
    if string:
        mask = parse_jsonstring(string,
                                io.imread(filename, as_gray=True).shape)
    else:
        raise PreventUpdate
    return array_to_data_url((255 * mask).astype(np.uint8))
Ejemplo n.º 10
0
def segmentation(string):
    if string:
        mask = parse_jsonstring(string, img.shape)
        seg = watershed_segmentation(img, mask)
        src = color.label2rgb(seg, image=img)
    else:
        raise PreventUpdate
    return array_to_data_url(img_as_ubyte(src))
def send_file(filename,mime_type=None):
    try:
        content = array_to_data_url(img_as_ubyte(ROIRGB))
        data = content.encode("utf8").split(b";base64,")[1]
        imgtowrite = io.BytesIO(base64.decodebytes(data))
        c = base64.b64encode(imgtowrite.read()).decode()
        return dict(content=c, filename=filename,mime_type=mime_type, base64=True)
    except Exception: pass
Ejemplo n.º 12
0
def update_figure(labs):
    if labs:
        new_labels = np.array(labs)
        overlay = segmentation.mark_boundaries(img, new_labels)
        overlay = img_as_ubyte(overlay)
        return array_to_data_url(overlay)
    else:
        raise PreventUpdate
Ejemplo n.º 13
0
def segmentation(string):
    if string:
        mask = parse_jsonstring(string,
                                io.imread(filename, as_gray=True).shape)
        seg = watershed_segmentation(io.imread(filename, as_gray=True), mask)
        src = color.label2rgb(seg, image=io.imread(filename, as_gray=True))
    else:
        raise PreventUpdate
    return array_to_data_url(img_as_ubyte(src))
Ejemplo n.º 14
0
def load_slice(n, n_slider, full_cut, res_state):
    if n_slider is None:
        return dash.no_update
    if full_cut is not None:
        print(full_cut['index'])
    if full_cut is not None and full_cut[
            'index'] == n_slider and res_state == 'coarse':
        return dash.no_update
    return dict(cut=array_to_data_url(img[int(n_slider)]), index=int(n_slider))
Ejemplo n.º 15
0
def update_data(string):
    pprint(string)

    if string:
        mask = parse_jsonstring(string, (canvas_width, canvas_height))
    else:
        raise PreventUpdate

    return array_to_data_url((255 * mask).astype(np.uint8))
Ejemplo n.º 16
0
def save_segmentation(labs, save_mode):
    if labs:
        new_labels = np.array(labs)
        np.save('labels.npy', new_labels)
        if save_mode == 'png':
            color_labels = color.label2rgb(new_labels)
            uri = array_to_data_url(new_labels, dtype=np.uint8)
            return uri
    else:
        raise PreventUpdate
Ejemplo n.º 17
0
    def create_images(size_distr_json, pred_json_copy):
        """Creates all the relevant base64 image strings"""
        size_distr = json.loads(size_distr_json)
        pred_copy = json.loads(pred_json_copy)

        # create black and white image
        pred = 255 * np.asarray(pred_copy['yimage_list'], dtype=np.uint8)
        encoded_pred = array_to_data_url(pred)

        # create blue and gold overlay
        lookup = np.asarray([[153, 153, 0], [45, 0, 78]], dtype=np.uint8)
        colour = lookup[pred_copy['yimage_list']]
        encoded_colour = (
            pred_copy['content_type']
            + ','
            + cu.numpy_2_b64(colour)
        )

        # create labeled image
        unique = size_distr['unique_list']
        labeled = np.asarray(size_distr['labeled_list'])
        flattened_colour_arr = np.linspace(
            0,
            256 ** 3 - 1,
            num=len(unique) + 1,
            dtype=np.int32
        )

        # represent values in flattened_color_arr as three digit number with
        # base 256 to create a color array with shape
        # (num unique values including background, 3)
        colours_labeled = np.zeros((len(unique) + 1, 3), dtype=np.uint8)
        for i in range(len(colours_labeled)):
            colours_labeled[i] = np.array([
                (flattened_colour_arr[i] // (256 ** 2)) % 256,
                (flattened_colour_arr[i] // (256)) % 256,
                flattened_colour_arr[i] % 256
            ])

        # create a lookup table using colors array and convert 2D labeled array
        # into 3D rgb array
        lookup_labeled = np.zeros((len(unique) + 1, 3), dtype=np.uint8)
        lookup_labeled[np.unique(labeled - 1)] = colours_labeled
        rgb_labeled = lookup_labeled[labeled - 1]

        # convert from numpy array to base64 image
        # rgb = np.asarray(data['rgb_pred_list'], dtype=np.uint8)
        encoded_rgb = (
            size_distr['content_type']
            + ','
            + cu.numpy_2_b64(rgb_labeled)
        )
        return [encoded_pred, encoded_colour, encoded_rgb]
Ejemplo n.º 18
0
def test_canvas_undo_redo(dash_duo):
    h, w = 10, 10
    overlay = np.zeros((h, w), dtype=np.uint8)
    overlay = img_as_ubyte(overlay)

    # Set up a small app. This could probably be made into a fixture.
    app = dash.Dash(__name__)
    app.layout = html.Div([
        dcc.Store(id='cache', data=''),
        dash_canvas.DashCanvas(id="canvas",
                               width=w,
                               height=h,
                               image_content=array_to_data_url(overlay),
                               goButtonTitle="save")
    ])

    data_saved = []

    @app.callback(Output('cache', 'data'), [Input("canvas", "trigger")],
                  [State("canvas", "json_data")])
    def update_overlay(flag, data):
        data_saved.append(data)

    dash_duo.start_server(app)

    # At application startup, a black 10x10 image is shown. When we click
    # save, we expect a non-trivial JSON object representing this image. We
    # assert that we get this object, but we don't dig into it.
    btn = _get_button_by_title(dash_duo, "Save")
    btn.click()

    objs_1 = json.loads(data_saved[-1])['objects']
    assert len(objs_1) > 0

    # When we click "undo", the image disappears. We check that we get an
    # empty JSON representation back.
    btn = _get_button_by_title(dash_duo, "Undo")
    btn.click()
    btn = _get_button_by_title(dash_duo, "Save")
    btn.click()

    objs_2 = json.loads(data_saved[-1])['objects']
    assert objs_2 == []

    # When we click "redo", the original 10x10 black image is restored.
    btn = _get_button_by_title(dash_duo, "Redo")
    btn.click()
    btn = _get_button_by_title(dash_duo, "Save")
    btn.click()

    objs_3 = json.loads(data_saved[-1])['objects']
    assert objs_1 == objs_3
def segmentation(string, content, NDVIcontent):
    if string:
        data = content.encode("utf8").split(b";base64,")[1]
        NDVIdata = NDVIcontent.encode("utf8").split(b";base64,")[1]
        img = io.BytesIO()
        imgNDVI = io.BytesIO()
        img.write(base64.b64decode(data))
        imgNDVI.write(base64.b64decode(NDVIdata))
        img.seek(0)
        imgNDVI.seek(0)
        i = np.asarray(bytearray(img.read()), dtype=np.uint8)
        i = cv2.imdecode(i, cv2.IMREAD_COLOR)
        iNDVI = np.asarray(bytearray(imgNDVI.read()), dtype=np.uint8)
        iNDVI = cv2.imdecode(iNDVI, cv2.IMREAD_COLOR)
        mask = parse_jsonstring(string, (i.shape[0],i.shape[1]))
        ret,thresh = cv2.threshold(np.array(mask, dtype=np.uint8), 0, 255, cv2.THRESH_BINARY)
        im_floodfill = thresh.copy()
        h, w = thresh.shape[:2]
        m = np.zeros((h+2, w+2), np.uint8)
        cv2.floodFill(im_floodfill, m, (0,0), 255)
        im_floodfill_inv = cv2.bitwise_not(im_floodfill)
        im_out = thresh | im_floodfill_inv
        RGBimg = cv2.bitwise_and(i, i, mask=im_out)
        RGBimg = cv2.cvtColor(RGBimg, cv2.COLOR_BGR2RGB)
        target_size = (RGBimg.shape[1],(RGBimg.shape[0]))
        iNDVI = cv2.resize(iNDVI, target_size)
        NDVIimg = cv2.bitwise_and(iNDVI, iNDVI, mask=im_out)
        NDVIimg = cv2.cvtColor(NDVIimg, cv2.COLOR_BGR2RGB)

        contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
        cnts = sorted(contours, key=lambda c: cv2.contourArea(c), reverse=True)  # finds the largest selection, which is a limitation to multiple selection (will be changed in the future versions)
        (x,y,w,h) = cv2.boundingRect(cnts[0])
        ROIRGB = RGBimg[y:y+h, x:x+w]
        ROINDVI = NDVIimg[y:y+h, x:x+w]
    else:
        raise PreventUpdate
    return array_to_data_url(img_as_ubyte(ROIRGB)), array_to_data_url(img_as_ubyte(ROINDVI)) 
Ejemplo n.º 20
0
def classify(alteration, image):

    prevent_update(image)

    mask = parse_jsonstring(alteration)
    with open('bug.txt', 'w') as f:
        f.write(str(mask))
    image2 = array_to_data_url((255 * mask).astype(np.uint8))

    if alteration:
        return str(
            google_classify(
                combine(convert_base46(image), convert_base46(image2))))

    return str(google_classify(convert_base46(image)))
Ejemplo n.º 21
0
def make_empty_found_segments():
    """ fstc_slices is initialized to a bunch of images containing nothing (clear pixels) """
    found_segs_tensor = np.zeros_like(img)
    # convert to a colored image (but it will just be colored "clear")
    fst_colored = image_utils.label_to_colors(
        found_segs_tensor,
        colormap=["#000000", "#8A2BE2"],
        alpha=[0, 128],
        color_class_offset=0,
    )
    fstc_slices = [[
        array_to_data_url(np.moveaxis(fst_colored, 0, j)[i])
        for i in range(np.moveaxis(fst_colored, 0, j).shape[0])
    ] for j in range(NUM_DIMS_DISPLAYED)]
    return fstc_slices
Ejemplo n.º 22
0
def show_draw(string):
    if not string:
        raise PreventUpdate

    # Convert image string to displayable image
    mask = parse_jsonstring(string, shape)
    mask = (255 * mask).astype(np.uint8)
    mask = cv2.resize(mask, (400, 100), interpolation=cv2.INTER_AREA)
    mask = cv2.threshold(mask, 0, 255,
                         cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    captcha_img = array_to_data_url(mask)

    # Show image and save image string
    return captcha_img, string
Ejemplo n.º 23
0
def draw_shapes_react(
    drawn_shapes_data,
    image_display_top_figure,
    image_display_side_figure,
    current_render_id,
):
    if any(
        [
            e is None
            for e in [
                drawn_shapes_data,
                image_display_top_figure,
                image_display_side_figure,
            ]
        ]
    ):
        return dash.no_update
    t1 = time.time()
    found_segs_tensor = shapes_to_segs(
        drawn_shapes_data, image_display_top_figure, image_display_side_figure,
    )
    t2 = time.time()
    PRINT("Time to convert shapes to segments:", t2 - t1)
    # convert to a colored image
    fst_colored = image_utils.label_to_colors(
        found_segs_tensor,
        colormap=["#8A2BE2"],
        alpha=[128],
        # we map label 0 to the color #000000 using no_map_zero, so we start at
        # color_class 1
        color_class_offset=1,
        labels_contiguous=True,
        no_map_zero=True,
    )
    t3 = time.time()
    PRINT("Time to convert from labels to colored image:", t3 - t2)
    fstc_slices = [
        [
            array_to_data_url(s) if np.any(s != 0) else blank_seg_slices[j]
            for s in np.moveaxis(fst_colored, 0, j)
        ]
        for j in range(NUM_DIMS_DISPLAYED)
    ]
    t4 = time.time()
    PRINT("Time to convert to data URLs:", t4 - t3)
    PRINT("Total time to compute 2D annotations:", t4 - t1)
    return fstc_slices, current_render_id + 1
Ejemplo n.º 24
0
    def update_figure_upload(string, image, height, width, birdeye,
                             calculation):

        if string:
            if image is None:
                im = img_app3
            else:
                im = image_string_to_PILImage(image)
                im = np.asarray(im)

            dat, locsgrid, locs, gray, output_image = solve_sudoku(
                im,
                beyeview=birdeye,
                digit_h=(height[0], height[1]),
                digit_w=(width[0], width[1]))
            if calculation:
                dat = compute(locsgrid, locs, gray, output_image)
            return array_to_data_url(dat)
        else:
            raise PreventUpdate
Ejemplo n.º 25
0
def update_data(string):
    if string:

        try:
            mask = parse_jsonstring(string,
                                    shape=(canvas_height, canvas_width))
        except:
            return "Out of Bounding Box, click clear button and try again"
        # np.savetxt('data.csv', mask) use this to save the canvas annotations as a numpy array
        # Invert True and False
        mask = (~mask.astype(bool)).astype(int)

        image_string = array_to_data_url((255 * mask).astype(np.uint8))

        # this is from canvas.utils.image_string_to_PILImage(image_string)
        img = Image.open(BytesIO(base64.b64decode(image_string[22:])))

        text = "{}".format(
            pytesseract.image_to_string(img, lang="eng", config="--psm 6"))
        return text
    else:
        raise PreventUpdate
Ejemplo n.º 26
0
def update_figure_upload(string, image):
    if string:
        if image is None:
            im = img_app3
        else:
            im = image_string_to_PILImage(image)
            im = np.asarray(im)
        shape = im.shape[:2]
        try:
            mask = parse_jsonstring(string, shape=shape)
        except IndexError:
            raise PreventUpdate
        if mask.sum() > 0:
            seg = superpixel_color_segmentation(im, mask)
        else:
            seg = np.ones(shape)
        fill_value = 255 * np.ones(3, dtype=np.uint8)
        dat = np.copy(im)
        dat[np.logical_not(seg)] = fill_value
        return array_to_data_url(dat)
    else:
        raise PreventUpdate
Ejemplo n.º 27
0
        dat = np.load(LOAD_SUPERPIXEL)
        segl = dat["segl"]
        seg = dat["seg"]
else:
    # partition image
    segl, seg = make_seg_image(img)

if len(SAVE_SUPERPIXEL) > 0:
    np.savez(SAVE_SUPERPIXEL, segl=segl, seg=seg)
    exit(0)

seg_img = img_as_ubyte(segl)
img_slices, seg_slices = [
    [
        # top
        [array_to_data_url(im[i, :, :]) for i in range(im.shape[0])],
        # side
        [array_to_data_url(im[:, i, :]) for i in range(im.shape[1])],
    ] for im in [img, seg_img]
]
# initially no slices have been found so we don't draw anything
found_seg_slices = make_empty_found_segments()
# store encoded blank slices for each view to save recomputing them for slices
# containing no colored pixels
blank_seg_slices = [found_seg_slices[0][0], found_seg_slices[1][0]]

app = dash.Dash(__name__)
server = app.server

top_fig, side_fig = [
    make_default_figure(
Ejemplo n.º 28
0
import dash_html_components as html
import numpy as np
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from dash_canvas import DashCanvas
from dash_canvas.utils import (
    array_to_data_url,
    image_string_to_PILImage,
    parse_jsonstring,
    superpixel_color_segmentation,
)
from skimage import color, io

image_path = "img/bird2.png"
default_image = io.imread(image_path)
image_string = array_to_data_url(default_image)
app = dash.Dash(__name__)

app.layout = html.Div([
    html.Div(
        [
            DashCanvas(
                id="first-image",
                filename=image_string,
                width=500,
                lineWidth=5,
                lineColor="lime",
                goButtonTitle="remove",
            )
        ],
        style={
Ejemplo n.º 29
0
def update_data(string):
    if string:
        mask = parse_jsonstring(string, shape)
    else:
        raise PreventUpdate
    return array_to_data_url((255 * mask).astype(np.uint8))
Ejemplo n.º 30
0
def fill_tab(tab):
    if tab == "canvas-tab":
        return [
            dash_canvas.DashCanvas(
                id="canvas-stitch",
                width=canvas_width,
                height=canvas_height,
                scale=scale,
                lineWidth=2,
                lineColor="red",
                tool="line",
                hide_buttons=["pencil"],
                image_content=array_to_data_url(
                    np.zeros((height, width), dtype=np.uint8)),
                goButtonTitle="Estimate translation",
            ),
            html.Div(
                children=[
                    html.Div(
                        image_upload_zone("upload-stitch",
                                          multiple=True,
                                          width="100px"))
                ],
                className="upload_zone",
                id="upload",
            ),
        ]
    elif tab == "result-tab":
        return [
            dcc.Loading(
                id="loading-1",
                children=[
                    html.Img(
                        id="stitching-result",
                        src=array_to_data_url(
                            np.zeros((height, width), dtype=np.uint8)),
                        width=canvas_width,
                    )
                ],
                type="circle",
            ),
            html.Div(
                [
                    html.Label("Contrast"),
                    dcc.Slider(id="contrast-stitch",
                               min=0,
                               max=1,
                               step=0.02,
                               value=0.5),
                ],
                className="result_slider",
            ),
            html.Div(
                [
                    html.Label("Brightness"),
                    dcc.Slider(id="brightness-stitch",
                               min=0,
                               max=1,
                               step=0.02,
                               value=0.5),
                ],
                className="result_slider",
            ),
        ]
    return [
        html.Img(id="bla",
                 src=app.get_asset_url("stitch_demo.gif"),
                 width=canvas_width)
    ]