def get_level_from_server(msg, retrieval_mode="volume"):
    if retrieval_mode == "slice":  # get a slice over http
        src_annotations_dataset = DataModel.g.dataset_uri(
            msg["level_id"], group="annotations"
        )
        params = dict(
            workpace=True,
            src=src_annotations_dataset,
            slice_idx=cfg.current_slice,
            order=cfg.order,
        )
        result = Launcher.g.run("annotations", "get_slice", **params)
        if result:
            src_arr = decode_numpy(result)
    elif retrieval_mode == "volume_http":  # get a slice over http
        src_annotations_dataset = DataModel.g.dataset_uri(
            msg["level_id"], group="annotations"
        )
        params = dict(workpace=True, src=src_annotations_dataset)
        result = Launcher.g.run("annotations", "get_volume", **params)
        if result:
            src_arr = decode_numpy(result)
    elif retrieval_mode == "volume":  # get entire volume
        src = DataModel.g.dataset_uri(msg["level_id"], group="annotations")
        with DatasetManager(src, out=None, dtype="float32", fillvalue=0) as DM:
            src_annotations_dataset = DM.sources[0][:]
            src_arr = get_array_from_dataset(src_annotations_dataset)

    return src_arr, src_annotations_dataset
 def get_crop(msg):
     logger.debug(f"Getting crop roi: {msg}")
     features_src = DataModel.g.dataset_uri(msg["feature_id"], group="features")
     params = dict(workpace=True, src=features_src, roi=(0, 0, 0, 100, 100, 100))
     result = Launcher.g.run("features", "get_crop", **params)
     if result:
         src_arr = decode_numpy(result)
         viewer.add_image(src_arr, name=msg["feature_id"])
Exemple #3
0
 def show_slice(self, idx):
     params = dict(slice_idx=idx, workspace=True, timeit=True)
     params.update(self.viz_params)
     result = Launcher.g.run('render', 'render_workspace', **params)
     if result:
         image = decode_numpy(result)
         self.viewer.update_image(image)
     self.slider.accept()
Exemple #4
0
 def slice_updated(self, idx):
     super().slice_updated(idx)
     region = self.selection['region']
     if region:
         region = DataModel.g.dataset_uri(region)
         params = dict(workpace=True, src=region, slice_idx=idx)
         result = Launcher.g.run('regions', 'get_slice', **params)
         if result:
             region = decode_numpy(result)
             self.annotator.set_region(region)
     else:
         self.annotator.set_region(None)
 def calc_object_stats2(self):
     dst = DataModel.g.dataset_uri(self.analyzer_id, group="analyzer")
     src = DataModel.g.dataset_uri(self.features_source.value())
     all_params = dict(src=src, dst=dst, modal=False)
     all_params["workspace"] = DataModel.g.current_workspace
     all_params["feature_ids"] = str(self.features_source.value()[-1])
     all_params["object_id"] = str(self.objects_source.value())
     logger.debug(f"Running analyzer with params {all_params}")
     result = Launcher.g.run("analyzer", "object_stats", **all_params)
     if result:
         src_arr = decode_numpy(result)
         sc = MplCanvas(self, width=5, height=4, dpi=100)
         sc.axes.imshow(src_arr)
         self.add_row(sc, max_height=300)
    def calc_object_stats(self):
        dst = DataModel.g.dataset_uri(self.analyzer_id, group="analyzer")
        src = DataModel.g.dataset_uri(self.features_source.value())
        all_params = dict(src=src, dst=dst, modal=False)
        all_params["workspace"] = DataModel.g.current_workspace
        all_params["feature_ids"] = str(self.features_source.value()[-1])
        all_params["object_id"] = str(self.objects_source.value())
        all_params["stat_name"] = self.stat_name_combo_box.value()
        logger.debug(f"Running analyzer with params {all_params}")
        result = Launcher.g.run("analyzer", "object_stats", **all_params)
        (point_features, img) = result
        if result:
            logger.debug(f"Object stats result table {len(point_features)}")
            tabledata = []
            for i in range(len(point_features)):
                entry = (i, point_features[i])
                tabledata.append(entry)
            tabledata = np.array(
                tabledata,
                dtype=[
                    ("index", int),
                    ("z", float),
                ],
            )
            src_arr = decode_numpy(img)
            sc = MplCanvas(self, width=6, height=5, dpi=80)
            sc.axes.imshow(src_arr)
            sc.axes.axis("off")
            self.add_row(sc, max_height=500)
            if self.table_control is None:
                self.table_control = TableWidget()
                self.add_row(self.table_control.w, max_height=500)

            self.table_control.set_data(tabledata)
            self.collapse()
            self.expand()
    def jump_to_slice(msg):
        cfg.supervoxels_cached = False
        cfg.retrieval_mode = "slice"
        cfg.current_slice = int(msg["frame"])
        
        existing_feature_layer = [
            v for v in viewer.layers if v.name == cfg.current_feature_name
        ]

        if existing_feature_layer:
            features_src = DataModel.g.dataset_uri(
                cfg.current_feature_name, group="features"
            )
            params = dict(
                workpace=True,
                src=features_src,
                slice_idx=cfg.current_slice,
                order=cfg.order,
            )
            result = Launcher.g.run("features", "get_slice", **params)
            if result:
                src_arr = decode_numpy_slice(result)
                existing_feature_layer[0].data = src_arr.copy()

        existing_regions_layer = [
            v for v in viewer.layers if v.name == cfg.current_regions_name
        ]
        if existing_regions_layer:
            regions_src = DataModel.g.dataset_uri(
                cfg.current_regions_name, group="superregions"
            )
            params = dict(
                workpace=True,
                src=regions_src,
                slice_idx=cfg.current_slice,
                order=cfg.order,
            )
            result = Launcher.g.run("superregions", "get_slice", **params)
            if result:
                src_arr = decode_numpy(result)
                src_arr = find_boundaries(src_arr) * 1.0
                existing_regions_layer[0].data = src_arr.copy()
                existing_regions_layer[0].opacity = 0.3

        existing_level_layer = [
            v for v in viewer.layers if v.name == cfg.current_annotation_name
        ]
        if existing_level_layer and cfg.current_annotation_name is not None:
            paint_annotations({"level_id": cfg.current_annotation_name})

        existing_pipeline_layer = [
            v for v in viewer.layers if v.name == cfg.current_pipeline_name
        ]

        if existing_pipeline_layer:
            print(f"loading pipeline {cfg.current_pipeline_name}")
            pipeline_src = DataModel.g.dataset_uri(
                cfg.current_pipeline_name, group="pipelines"
            )
            params = dict(
                workpace=True,
                src=pipeline_src,
                slice_idx=cfg.current_slice,
                order=cfg.order,
            )
            result = Launcher.g.run("features", "get_slice", **params)
            if result:
                src_arr = decode_numpy(result).astype(np.int32)
                existing_pipeline_layer[0].data = src_arr.copy()
    
        existing_analyzers_layer = [
            v for v in viewer.layers if v.name == cfg.current_analyzers_name
        ]

        if existing_analyzers_layer:
            print(f"Jumping to analyzer slice {cfg.current_analyzers_name}")
            analyzers_src = DataModel.g.dataset_uri(
                cfg.current_analyzers_name, group="analyzer"
            )
            params = dict(
                workpace=True,
                src=analyzers_src,
                slice_idx=cfg.current_slice,
                order=cfg.order,
            )
            result = Launcher.g.run("features", "get_slice", **params)
            if result:
                src_arr = decode_numpy(result).astype(np.int32)
                existing_analyzers_layer[0].data = src_arr.copy()
Exemple #8
0
def paint_strokes(
    msg,
    drag_pts,
    anno_layer,
    parent_level=None,
    parent_label_idx=None,
    viewer_order=(0, 1, 2),
):
    """ 
    Gather all information required from viewer and build and execute the annotation command
    """
    level = msg["level_id"]
    anno_layer_shape = anno_layer.data.shape
    anno_layer_shape = [anno_layer_shape[i] for i in viewer_order]

    if len(viewer_order) == 2:
        viewer_order=(0, 1, 2)

    if len(drag_pts) == 0:
        return

    try:
        sel_label = int(cfg.label_value["idx"]) - 1
        anno_layer.selected_label = sel_label
        anno_layer.brush_size = int(cfg.brush_size)

        if anno_layer.mode == "erase":
            sel_label = 0
            cfg.current_mode = "erase"
        else:
            cfg.current_mode = "paint"

        line_x = []
        line_y = []

        if len(drag_pts[0]) == 2:
            px, py = drag_pts[0]
            z = cfg.current_slice
            #print(f"Using z {z}")
        else:
            #logger.info(f"drag_pts[0] {drag_pts}")
            pt_data = drag_pts[0]
            pt_data = [pt_data[i] for i in viewer_order]
            z, px, py = pt_data
            #logger.debug(f"z px py {z} {px} {py}")

        if len(drag_pts[0]) == 2:
            # depending on the slice mode we need to handle either 2 or 3 coordinates
            for x, y in drag_pts[1:]:
                if x < anno_layer.data.shape[0] and y < anno_layer.data.shape[1]:
                    yy, xx = line(py, px, y, x)
                    line_x.extend(xx)
                    line_y.extend(yy)
                    py, px = y, x
                    anno_shape = (anno_layer.data.shape[0], anno_layer.data.shape[1])
                    
        else:  # cfg.retrieval_mode == 'volume':
            for zz, x, y in drag_pts[1:]:
                pt_data = [zz, x, y]
                pt_data = [pt_data[i] for i in viewer_order]
                zz, x, y = pt_data

                if x < anno_layer_shape[1] and y < anno_layer_shape[2]:
                    yy, xx = line(py, px, y, x)
                    line_x.extend(xx)
                    line_y.extend(yy)
                    py, px = y, x

            anno_shape = (anno_layer_shape[1], anno_layer_shape[2])

        line_x = np.array(line_x)
        line_y = np.array(line_y)


        
        if len(line_y) > 0:
            all_regions = set()
            # Check if we are painting using supervoxels, if not, annotate voxels
            if cfg.current_supervoxels == None:
                #cfg.local_sv = False
                if not cfg.three_dim:
                    line_y, line_x = dilate_annotations(
                        line_x,
                        line_y,
                        anno_shape,
                        anno_layer.brush_size,
                    )
                params = dict(workspace=True, level=level, label=sel_label)
                yy, xx = list(line_y), list(line_x)
                yy = [int(e) for e in yy]
                xx = [int(e) for e in xx]

                # todo: preserve existing
                params.update(
                    slice_idx=int(z),
                    yy=yy,
                    xx=xx,
                    parent_level=parent_level,
                    parent_label_idx=parent_label_idx,
                    full=False,
                    viewer_order=viewer_order,
                    three_dim = cfg.three_dim,
                    brush_size = int(cfg.brush_size),
                    centre_point = (int(z), int(px), int(py))
                )
    
                result = Launcher.g.run("annotations", "annotate_voxels", **params)

            # we are painting with supervoxels, so check if we have a current supervoxel cache
            # if not, get the supervoxels from the server
            else:
                line_x, line_y = dilate_annotations(
                    line_x,
                    line_y,
                    anno_shape,
                    anno_layer.brush_size,
                )

                supervoxel_size = cfg.supervoxel_size * 2
                bb = np.array(
                    [
                        max(0, int(z) - supervoxel_size),
                        max(0, min(line_x) - supervoxel_size),
                        max(0, min(line_y) - supervoxel_size),
                        int(z) + supervoxel_size,
                        max(line_x) + supervoxel_size,
                        max(line_y) + supervoxel_size,
                    ]
                )
                bb = bb.tolist()
                #logger.debug(f"BB: {bb}")

                if cfg.supervoxels_cached == False:                    
                    if cfg.retrieval_mode == 'volume' or cfg.retrieval_mode == "volume_http":
                        regions_dataset = DataModel.g.dataset_uri(
                            cfg.current_regions_name, group="superregions"
                        )
                        with DatasetManager(
                            regions_dataset,
                            out=None,
                            dtype="uint32",
                            fillvalue=0,
                        ) as DM:
                            src_dataset = DM.sources[0]
                            sv_arr = src_dataset[:]
                    else:
                        regions_dataset = DataModel.g.dataset_uri(
                            cfg.current_regions_name, group="superregions"
                        )
                        params = dict(
                            workpace=True,
                            src=regions_dataset,
                            slice_idx=cfg.current_slice,
                            order=cfg.order,
                        )
                        result = Launcher.g.run("superregions", "get_slice", **params)
                        if result:
                            sv_arr = decode_numpy(result)
                        
                    #logger.debug(f"Loaded superregion array of shape {sv_arr.shape}")

                    cfg.supervoxels_cache = sv_arr
                    cfg.supervoxels_cached = True
                    cfg.current_regions_dataset = regions_dataset
                else:
                    sv_arr = cfg.supervoxels_cache

                if cfg.retrieval_mode != 'slice':
                    viewer_order_str = "".join(map(str, viewer_order))
                    if viewer_order_str != "012" and len(viewer_order_str) == 3:
                        sv_arr = np.transpose(sv_arr, viewer_order)
                        #logger.debug(f"After viewer_order transform {sv_arr.shape}")

                for x, y in zip(line_x, line_y):
                    if cfg.retrieval_mode=='slice':
                        if (
                            x >= 0
                            and x < sv_arr.shape[0]
                            and y >= 0
                            and y < sv_arr.shape[1]
                        ):
                            sv = sv_arr[x, y]
                            all_regions |= set([sv])
                    else:
                        if (
                            z >= 0
                            and z < sv_arr.shape[0]
                            and x >= 0
                            and x < sv_arr.shape[1]
                            and y >= 0
                            and y < sv_arr.shape[2]
                        ):
                            sv = sv_arr[z, x, y]
                            all_regions |= set([sv])

                # Commit annotation to server
                params = dict(workspace=True, level=level, label=sel_label)


                if cfg.remote_annotation:
                    params.update(
                        region=cfg.current_regions_dataset,
                        r=list(map(int, all_regions)),
                        modal=False,
                        parent_level=parent_level,
                        parent_label_idx=parent_label_idx,
                        full=False,
                        bb=bb,
                        viewer_order=viewer_order,
                    )
                    result = Launcher.g.run("annotations", "annotate_regions", **params)
                else: 
                    cfg.local_sv = True
                    _annotate_regions_local(
                                    cfg.anno_data.copy(),
                                    sv_arr,
                                    list(map(int, all_regions)),
                                    label=sel_label,
                                    parent_level=parent_level,
                                    parent_label_idx=parent_label_idx,
                                    bb=bb,
                                    viewer_order=viewer_order)
                    
    except Exception as e:
        logger.debug(f"paint_strokes Exception: {e}")