def show_grad(file_path, name, vec_lec=1, downsample_fac=1):
    global viewer
    global res
    h5file = h5py.File(file_path, 'r')
    for key in h5file.keys():
        line_id = 0
        data = h5file[key]
        with viewer.txn() as s:
            s.layers.append(name=name + '_' + key + '_end',
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            s.layers.append(name=name + '_' + key + '_start',
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            line = s.layers[-2].annotations
            point = s.layers[-1].annotations
            for i in range(data.shape[0]):
                line.append(
                    neuroglancer.LineAnnotation(
                        id=line_id,
                        point_a=downsample_fac * data[i, 0:3],
                        point_b=downsample_fac * data[i, 0:3] +
                        vec_lec * data[i, 3:6]))
                point.append(
                    neuroglancer.PointAnnotation(id=line_id + 500000,
                                                 point=downsample_fac *
                                                 data[i, 0:3]))

                line_id += 1
def show_matches_gt_npy(filepath, name):
    global viewer
    global res
    with viewer.txn() as s:
        npf = np.load(filepath)
        keys = npf.item().keys()
        for key in keys:
            l = np.asarray(npf.item().get(key))

            s.layers.append(name='start_' + name,
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            start = s.layers[-1].annotations

            s.layers.append(name='end_' + name,
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            end = s.layers[-1].annotations
            id = -1
            for data in l:
                id += 1
                start.append(
                    neuroglancer.LineAnnotation(id=id,
                                                point_a=data[2][::-1],
                                                point_b=data[3][::-1]))
                end.append(
                    neuroglancer.PointAnnotation(id=id, point=data[2][::-1]))
def show_grad_field(file_path,
                    name,
                    seg_file=None,
                    seg_id=None,
                    vec_lec=1,
                    upsample_fac=1,
                    sparsity=15,
                    bloat=0):
    global viewer
    global res
    h5file = h5py.File(file_path, 'r')

    if seg_file is not None:
        h5file_seg = h5py.File(seg_file, 'r')
        segs = np.asarray(h5file_seg['main'])
        h5file_seg.close()
        mask = (segs == seg_id)
    else:
        mask = None

    for key in h5file.keys():
        line_id = 0
        data = h5file[key]

        if mask is None:
            locations = np.transpose(
                np.nonzero((data[0] != 0) | (data[1] != 0) | (data[2] != 0)))
        else:
            if bloat > 0:
                mask = ndimage.binary_dilation(mask,
                                               structure=np.ones((3, 3, 3)),
                                               iterations=int(bloat))
            locations = np.transpose(np.nonzero((mask)))

        locations = locations[::sparsity]
        with viewer.txn() as s:
            s.layers.append(name=name + '_' + key + '_end',
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            s.layers.append(name=name + '_' + key + '_start',
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            line = s.layers[-2].annotations
            point = s.layers[-1].annotations
            for i in range(locations.shape[0]):
                loc = tuple(locations[i])
                line.append(
                    neuroglancer.LineAnnotation(
                        id=line_id,
                        point_a=upsample_fac * locations[i, ::-1],
                        point_b=upsample_fac * locations[i, ::-1] +
                        vec_lec * np.array([
                            data[(2, ) + loc], data[(1, ) + loc], data[
                                (0, ) + loc]
                        ])))
                point.append(
                    neuroglancer.PointAnnotation(id=line_id + 500000,
                                                 point=upsample_fac *
                                                 locations[i, ::-1]))

                line_id += 1
def add_neuron(s, df, neuron_ids=[], show_ellipsoid_annotation=False):
    if len(neuron_ids) == 0:
        neuron_ids = list(np.unique(df.neuron_id))
    for ii, neuron_id in enumerate(neuron_ids):
        color = "#%06x" % random.randint(0, 0xFFFFFF)
        pos_dic = {}
        df_neuron = df[df.neuron_id == neuron_id]
        edges = []
        for index, row in df_neuron.iterrows():
            node_id = row.id
            pos = np.array(row.position)
            pos_dic[node_id] = np.flip(pos)
            if row.parent_id:
                edges.append((node_id, row.parent_id))

        v_nodes, u_nodes, connectors = [], [], []
        print(f'Loaded {len(pos_dic)} nodes for neuron id {neuron_id}')
        for u, v in edges:
            if u in pos_dic and v in pos_dic:
                u_site = pos_dic[u]
                v_site = pos_dic[v]

                u_nodes.append(
                    neuroglancer.EllipsoidAnnotation(center=u_site,
                                                     radii=(30, 30, 30),
                                                     id=next(ngid)))
                v_nodes.append(
                    neuroglancer.EllipsoidAnnotation(center=v_site,
                                                     radii=(30, 30, 30),
                                                     id=next(ngid)))
                connectors.append(
                    neuroglancer.LineAnnotation(point_a=u_site,
                                                point_b=v_site,
                                                id=next(ngid)))

        s.layers['neuronskeleton_{}'.format(
            neuron_id)] = neuroglancer.AnnotationLayer(
                voxel_size=(1, 1, 1),
                filter_by_segmentation=False,
                annotation_color=color,
                annotations=connectors,
            )
        if show_ellipsoid_annotation:
            s.layers['node_u_{}'.format(
                neuron_id)] = neuroglancer.AnnotationLayer(
                    voxel_size=(1, 1, 1),
                    filter_by_segmentation=False,
                    annotation_color=color,
                    annotations=u_nodes,
                )
            s.layers['node_v_{}'.format(
                neuron_id)] = neuroglancer.AnnotationLayer(
                    voxel_size=(1, 1, 1),
                    filter_by_segmentation=False,
                    annotation_color=color,
                    annotations=v_nodes,
                )
Exemple #5
0
def add_trees(s, trees, node_id, name, visible=False):
    if trees is None:
        return None
    # print(f"Drawing {name} with {len(trees.nodes)} nodes")
    for i, cc_nodes in enumerate(nx.weakly_connected_components(trees)):
        # print(f"drawing cc {i} with {len(cc_nodes)} nodes")
        cc = trees.subgraph(cc_nodes)
        mst = []
        for u, v in cc.edges():
            pos_u = np.array(cc.nodes[u]["pos"]) + 0.5
            pos_v = np.array(cc.nodes[v]["pos"]) + 0.5
            mst.append(
                neuroglancer.LineAnnotation(point_a=pos_u[::-1],
                                            point_b=pos_v[::-1],
                                            id=next(node_id)))
        # print(f"adding {len(mst)} edge annotations")

        s.layers.append(
            name="{}_{}".format(name, i),
            layer=neuroglancer.AnnotationLayer(annotations=mst),
            annotationColor="#{:02X}{:02X}{:02X}".format(
                random.randint(0, 255), random.randint(0, 255),
                random.randint(0, 255)),
            visible=visible,
        )
Exemple #6
0
    def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(
            data=inf_results, voxel_size=list(self.gt_vol.resolution))

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.AnnotationLayer()
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
        self.flood_fill_event = threading.Event()
        t = threading.Thread(
            target=self._do_flood_fill,
            kwargs=dict(
                initial_pos=pos,
                inf_results=inf_results,
                inf_volume=inf_volume,
                event=self.flood_fill_event,
            ))
        t.daemon = True
        t.start()
Exemple #7
0
 def display_bounding_box(self, txn):
     box = neuroglancer.AxisAlignedBoundingBoxAnnotation()
     box.point_a = self.box_coords[0]
     box.point_b = self.box_coords[1]
     box.id = "selection"
     txn.layers["selection"] = neuroglancer.AnnotationLayer(
         annotations=[box])
Exemple #8
0
def add_trees(s, trees, node_id, name, visible=False):
    if trees is None:
        return None
    trees = nx.to_directed(trees)
    ccs = list(nx.weakly_connected_components(trees))
    if len(ccs) < 10:
        for i, cc_nodes in enumerate(nx.weakly_connected_components(trees)):
            cc = trees.subgraph(cc_nodes)
            mst = []
            for u, v in cc.edges():
                pos_u = np.array(cc.nodes[u]["location"]) + 0.5
                pos_v = np.array(cc.nodes[v]["location"]) + 0.5
                mst.append(
                    neuroglancer.LineAnnotation(point_a=pos_u[::-1],
                                                point_b=pos_v[::-1],
                                                id=next(node_id)))

            s.layers.append(
                name="{}_{}".format(name, i),
                layer=neuroglancer.AnnotationLayer(annotations=mst),
                annotationColor="#{:02X}{:02X}{:02X}".format(
                    random.randint(0, 255),
                    random.randint(0, 255),
                    random.randint(0, 255),
                ),
                visible=visible,
            )
    else:
        mst = []
        for u, v in trees.edges():
            pos_u = np.array(trees.nodes[u]["location"]) + 0.5
            pos_v = np.array(trees.nodes[v]["location"]) + 0.5
            mst.append(
                neuroglancer.LineAnnotation(point_a=pos_u[::-1],
                                            point_b=pos_v[::-1],
                                            id=next(node_id)))

        s.layers.append(
            name="{}".format(name),
            layer=neuroglancer.AnnotationLayer(annotations=mst),
            annotationColor="#{:02X}{:02X}{:02X}".format(
                random.randint(0, 255), random.randint(0, 255),
                random.randint(0, 255)),
            visible=visible,
        )
Exemple #9
0
def handle_synapses(ngviewer, path):
    """Add pre/post-synapses hosted via http as a neuroglancer layer."""
    # This function adds synapses in the precomputed format hosted remotely via http to a neuroglancer instance.
    presynapsepath = 'precomputed://' + path + '/precomputed/presynapses'
    postsynapsepath = 'precomputed://' + path + '/precomputed/postsynapses'
    with ngviewer.txn() as s:
        s.layers['presynapses'] = neuroglancer.AnnotationLayer(
            source=presynapsepath,
            annotationColor='#ff0000',
            linked_segmentation_layer={'presynapses_cell': 'skeleton'},
            filter_by_segmentation=['presynapses_cell'])
        s.layers['postsynapses'] = neuroglancer.AnnotationLayer(
            source=postsynapsepath,
            annotationColor='#0000ff',
            linked_segmentation_layer={'postsynapses_cell': 'skeleton'},
            filter_by_segmentation=['postsynapses_cell'])

    return ngviewer
Exemple #10
0
def handle_points(ngviewer, path, layer_name, annotationColor):
    """Add points hosted via http as a neuroglancer layer."""
    # This function adds points in the precomputed format hosted locally via http to a neuroglancer instance.
    pointpath = 'precomputed://' + path + '/precomputed/' + layer_name
    with ngviewer.txn() as s:
        s.layers[layer_name] = neuroglancer.AnnotationLayer(
            source=pointpath, annotationColor=annotationColor)

    return ngviewer
Exemple #11
0
    def make_initial_state(self, segment_id, base_state):
        state = copy.deepcopy(base_state)

        segments = self.get_state_segment_ids(state)
        segments.clear()
        segments.add(segment_id)
        state.layers[
            self.annotation_layer_name] = neuroglancer.AnnotationLayer()

        return state
def show_bbox(pointa, pointb, name):
    global viewer
    global res
    pointa = np.asarray(pointa)
    pointb = np.asarray(pointb)
    with viewer.txn() as s:
        s.layers.append(name='bb_' + name,
                        layer=neuroglancer.AnnotationLayer(voxelSize=res))
        s.layers[-1].annotations.append(
            neuroglancer.AxisAlignedBoundingBoxAnnotation(
                id=0, point_a=(pointa)[::-1], point_b=(pointb)[::-1]))
Exemple #13
0
    def display(self):
        if self.img_path.startswith("precomputed:"):
            img = self.img_path
        else:
            img = load_image(self.img_path, self.x0, self.x1, self.y0, self.y1,
                             self.z0, self.z1)
        if self.alt_img_path is not None:
            if self.alt_img_path.startswith("precomputed:"):
                alt_img = self.alt_img_path
            else:
                alt_img = load_image(self.alt_img_path, self.x0, self.x1,
                                     self.y0, self.y1, self.z0, self.z1)
        if self.seg_path is not None:
            seg = load_image(self.seg_path, self.x0, self.x1,
                             self.y0, self.y1, self.z0, self.z1)
        with self.viewer.txn() as txn:
            layer(txn, "image", img, gray_shader, self.multiplier,
                  self.x0, self.y0, self.z0)
            if self.alt_img_path is not None:
                layer(txn, "alt-image", alt_img, green_shader,
                      self.alt_multiplier,
                      self.x0, self.y0, self.z0)
            if self.seg_path is not None:
                seglayer(txn, "segmentation", seg, self.x0, self.y0, self.z0)
            self.display_points(txn, self.points, "annotation", COLOR_POINTS)
            if self.detected_points is not None:
                self.display_points(txn, self.detected_points, "detected",
                                    COLOR_DETECTED_POINTS)
            elif has_layer(txn, "detected"):
                del txn.layers["detected"]
            if self.deleting_points is not None:
                self.display_points(txn, self.deleting_points, "deleting",
                                    COLOR_DELETING_POINTS)
            elif has_layer(txn, "deleting"):
                del txn.layers["deleting"]

            if self.box_coords is not None:
                self.display_bounding_box(txn)
            if self.bounding_box is not None:
                box = neuroglancer.AxisAlignedBoundingBoxAnnotation()
                box.point_a = self.bounding_box[0]
                box.point_b = self.bounding_box[1]
                box.id = "bounding-box"
                txn.layers["bounding-box"] = neuroglancer.AnnotationLayer(
                    annotations=[box])
                txn.position.voxel_coordinates = \
                    [(a + b) / 2 for a, b in zip(*self.bounding_box)]

            elif has_layer(txn, "selection"):
                del txn.layers["selection"]
        if self.bounding_box is None:
            self.center()
def show_ends_junctions(points_file, name):
    global viewer
    global res
    with h5py.File(points_file, 'r') as h5file:
        with viewer.txn() as s:
            s.layers.append(name='ends-' + name,
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            ends = s.layers[-1].annotations

            s.layers.append(name='junc-' + name,
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            junctions = s.layers[-1].annotations
            id = -1
            for key in h5file.keys():
                data = np.asarray(h5file[key])
                if key[0] == 'e': point = ends
                else: point = junctions
                for j in range(data.shape[0]):
                    id += 1
                    point.append(
                        neuroglancer.PointAnnotation(point=data[j, ::-1],
                                                     id=id))
def show_lines(filepath, name):
    global viewer
    global res
    h5file = h5py.File(filepath, 'r')
    with viewer.txn() as s:
        s.layers.append(name='start_' + name,
                        layer=neuroglancer.AnnotationLayer(voxelSize=res))
        start = s.layers[-1].annotations

        s.layers.append(name='end_' + name,
                        layer=neuroglancer.AnnotationLayer(voxelSize=res))
        end = s.layers[-1].annotations
        line_id = 0
        for key in h5file.keys():
            data = np.asarray(h5file[key])
            start.append(
                neuroglancer.LineAnnotation(id=line_id,
                                            point_a=data[2::-1],
                                            point_b=data[5:2:-1]))
            end.append(
                neuroglancer.PointAnnotation(id=line_id, point=data[0:3]))
            line_id += 1
Exemple #16
0
def add_annotation_layer(viewer, layer_name, verbose=True):
    if (layer_name in [l.name for l in viewer.state.layers]) & (verbose):
        print('{} is already a layer!'.format(layer_name))
    else:
        with viewer.txn() as s:
            s.layers.append(name=layer_name,
                            layer=neuroglancer.AnnotationLayer())

        # Neuroglancer seems to take a short delay before layers are ready for objects to be added to them.
        # This sleep adds enough delay to python-bound viewers
        if isinstance(viewer, neuroglancer.viewer.Viewer):
            sleep(2.5)
    return viewer
Exemple #17
0
def _set_viewer_seeds(s, seeds):
    for inclusive in [False, True]:
        layer_name = 'inclusive-seeds' if inclusive else 'exclusive-seeds'
        s.layers[layer_name] = neuroglancer.AnnotationLayer(
            annotation_color='green' if inclusive else 'red',
            annotations=[
                dict(
                    type='point',
                    id=x['id'],
                    point=x['position'],
                    description=str(x['supervoxel_id']),
                ) for x in seeds[inclusive]
            ],
        )
Exemple #18
0
    def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
            synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.perspective_zoom = 1024
            s.perspective_orientation = [
                0.63240087, 0.01582051, 0.05692779, 0.77238464
            ]
            s.navigation.zoom_factor = 32
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['synapses'] = neuroglancer.AnnotationLayer(
                linked_segmentation_layer='ground_truth')
            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layout='xy',
                    layers=['image', 'ground_truth', 'partners', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['ground_truth', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['partners', 'synapses'],
                ),
            ])

        self.selected_segments = frozenset()
        self.viewer.shared_state.add_changed_callback(
            lambda: self.viewer.defer_callback(self.on_state_changed))
def show_matches_pred_npy(filepath, name, weight_th=0.80):
    global viewer
    global res
    with viewer.txn() as s:
        results = np.load(filepath)

        s.layers.append(name='start_' + name,
                        layer=neuroglancer.AnnotationLayer(voxelSize=res))
        start = s.layers[-1].annotations

        s.layers.append(name='end_' + name,
                        layer=neuroglancer.AnnotationLayer(voxelSize=res))
        end = s.layers[-1].annotations
        id = -1
        for data in results:
            if float(data[5]) > weight_th:
                id += 1
                start.append(
                    neuroglancer.LineAnnotation(id=id,
                                                point_a=data[2][::-1],
                                                point_b=data[3][::-1]))
                end.append(
                    neuroglancer.PointAnnotation(id=id, point=data[2][::-1]))
Exemple #20
0
def bboxlayer(txn, name, x0, x1, y0, y1, z0, z1):
    """Add a bounding box layer

    :param txn: the neuroglancer viewer transaction context
    :param name: the name of the layer
    :param x0: the leftmost edge of the box
    :param x1: the rightmost edge of the box
    :param y0: the topmost edge of the box
    :param y1: the bottommoste edge of the box
    :param z0: the most shallow depth of the box
    :param z1: the deepest edge of the box
    """
    box = neuroglancer.AxisAlignedBoundingBoxAnnotation()
    box.point_a = [x0, y0, z0]
    box.point_b = [x1, y1, z1]
    box.id = name
    txn.layers[name] = neuroglancer.AnnotationLayer(annotations=[box])
def show_points(points_file, name, resolution=None):
    global viewer
    global res
    r = resolution if resolution else res
    h5file = h5py.File(points_file, 'r')
    with viewer.txn() as s:
        for key in h5file.keys():
            s.layers.append(name=name,
                            layer=neuroglancer.AnnotationLayer(voxelSize=r))
            points_layer = s.layers[-1].annotations

            points_ar = np.array(h5file[key])
            points = [
                neuroglancer.PointAnnotation(point=points_ar[i, ::-1], id=i)
                for i in range(points_ar.shape[0])
            ]
            points_layer.extend(points)
def dataset_view(datasetname):
    dataset = DataSet.query.filter(DataSet.name == datasetname).first_or_404()
    state = neuroglancer.ViewerState()
    state.layers['img'] = neuroglancer.ImageLayer(source='precomputed://' +
                                                  dataset.image_source)
    if dataset.pychunkedgraph_viewer_source is not None:
        state.layers['seg'] = neuroglancer.SegmentationLayer(
            source='graphene://' + dataset.pychunkedgraph_viewer_source)
    else:
        state.layers['seg'] = neuroglancer.SegmentationLayer(
            source='precomputed://' + dataset.flat_segmentation_source)
    state.layers['ann'] = neuroglancer.AnnotationLayer()
    state.layout = "xy-3d"
    ng_url = neuroglancer.to_url(state,
                                 prefix=current_app.config['NEUROGLANCER_URL'])
    return render_template('dataset.html',
                           dataset=dataset,
                           ng_url=ng_url,
                           version=__version__)
Exemple #23
0
def add_trees_no_skeletonization(
    s, trees, node_id, name, dimensions, visible=False, color=None
):
    mst = []
    for u, v in trees.edges():
        pos_u = np.array(trees.nodes[u]["location"]) + 0.5
        pos_v = np.array(trees.nodes[v]["location"]) + 0.5
        mst.append(
            neuroglancer.LineAnnotation(
                point_a=pos_u[::-1], point_b=pos_v[::-1], id=next(node_id)
            )
        )

    s.layers.append(
        name="{}".format(name),
        layer=neuroglancer.AnnotationLayer(annotations=mst),
        annotationColor="#{:02X}{:02X}{:02X}".format(255, 125, 125),
        visible=visible,
    )
Exemple #24
0
 def display_points(self):
     with self.viewer.txn() as txn:
         pointlayer(txn, "points", self.points[:, 0], self.points[:, 1],
                    self.points[:, 2], self.color)
         if self.deleting_points is not None:
             pointlayer(txn, "delete-points",
                        self.deleting_points[:, 0],
                        self.deleting_points[:, 1],
                        self.deleting_points[:, 2],
                        "red")
         if self.box_coords is not None:
             box = neuroglancer.AxisAlignedBoundingBoxAnnotation()
             box.point_a = self.box_coords[0]
             box.point_b = self.box_coords[1]
             box.id = "selection"
             txn.layers["selection"] = neuroglancer.AnnotationLayer(
                 annotations=[box])
         elif "selection" in txn.layers:
             del txn.layers["selection"]
Exemple #25
0
 def display_points_txn(self, txn):
     pointlayer(txn, self.name, self.points[:, 2], self.points[:, 1],
                self.points[:, 0], self.color,
                voxel_size=self.voxel_size)
     if self.deleting_points is not None:
         pointlayer(txn, "delete-%s" % self.name,
                    self.deleting_points[:, 2],
                    self.deleting_points[:, 1],
                    self.deleting_points[:, 0],
                    "red",
                    voxel_size=self.voxel_size)
     if self.box_coords is not None:
         box = neuroglancer.AxisAlignedBoundingBoxAnnotation()
         box.point_a = self.box_coords[0]
         box.point_b = self.box_coords[1]
         box.id = "selection"
         txn.layers["selection"] = neuroglancer.AnnotationLayer(
             annotations=[box])
     elif "selection" in txn.layers:
         del txn.layers["selection"]
def show_vec_2(direction_vecs_path, vec_lec, name):
    global viewer
    global res
    h5file = h5py.File(direction_vecs_path, 'r')
    for key in h5file.keys():
        line_id = 0
        data = h5file[key]
        with viewer.txn() as s:
            s.layers.append(name='dir_' + key,
                            layer=neuroglancer.AnnotationLayer(voxelSize=res))
            annotations = s.layers[-1].annotations
            for i in range(data.shape[0]):
                annotations.append(
                    neuroglancer.LineAnnotation(id=line_id,
                                                point_a=data[i][2::-1],
                                                point_b=data[i][2::-1] +
                                                vec_lec * data[i][5:2:-1]))
                annotations.append(
                    neuroglancer.PointAnnotation(id=line_id + 500000,
                                                 point=data[i][2::-1] +
                                                 vec_lec * data[i][5:2:-1]))
                line_id += 1
def add_synapses(s,
                 df,
                 pre_neuron_id=None,
                 post_neuron_id=None,
                 color=None,
                 name='',
                 radius=30):
    if pre_neuron_id is not None:
        df = df[df.id_skel_pre == pre_neuron_id]
    if post_neuron_id is not None:
        df = df[df.id_skel_post == post_neuron_id]

    pre_sites, post_sites, connectors = [], [], []
    print('Displaying {} of synapses'.format(len(df)))
    for index, syn in df.iterrows():
        pre_site = np.flip(syn.location_pre)
        post_site = np.flip(syn.location_post)

        pre_sites.append(
            neuroglancer.EllipsoidAnnotation(center=pre_site,
                                             radii=(radius, radius, radius),
                                             id=next(ngid)))
        post_sites.append(
            neuroglancer.EllipsoidAnnotation(center=post_site,
                                             radii=(radius, radius, radius),
                                             id=next(ngid)))
        connectors.append(
            neuroglancer.LineAnnotation(point_a=pre_site,
                                        point_b=post_site,
                                        id=next(ngid)))

    s.layers['synlinks_{}'.format(name)] = neuroglancer.AnnotationLayer(
        voxel_size=(1, 1, 1),
        filter_by_segmentation=False,
        annotation_color=color,
        annotations=connectors,
    )
Exemple #28
0
def handle_synapticdata(ngviewer, layer_kws):
    """Add the synapse predictions for the em datasets as a neuroglancer layer."""
    # This function adds synapse predictions to a neuroglancer instance.

    ngspace = _get_ngspace(layer_kws)

    for layername in ngspace['layers']:
        if ngspace['layers'][layername]['type'] == 'synapsepred':
            with ngviewer.txn() as s:
                s.layers[layername] = neuroglancer.AnnotationLayer(
                    source=ngspace['layers'][layername]['source'],
                    linked_segmentation_layer={
                        'pre_segment':
                        ngspace['layers'][layername]['linkedseg'],
                        'post_segment':
                        ngspace['layers'][layername]['linkedseg']
                    },
                    filter_by_segmentation=['post_segment', 'pre_segment'],
                    shader='''#uicontrol vec3 preColor color(default=\"blue\")
                                  # uicontrol vec3 postColor color(default=\"red\")
                                  # uicontrol float scorethr slider(min=0, max=1000)
                                  # uicontrol int showautapse slider(min=0, max=1)

                                  void main() {
                                  setColor(defaultColor());
                                  setEndpointMarkerColor(
                                  vec4(preColor, 0.5),
                                  vec4(postColor, 0.5));
                                  setEndpointMarkerSize(5.0, 5.0);
                                  setLineWidth(2.0);
                                  if (int(prop_autapse()) > showautapse) discard;
                                  if (prop_score()<scorethr) discard;
                                  }''',
                    shaderControls={"scorethr": 80},
                )

    return ngviewer
Exemple #29
0
def visualize_synapses_in_neuroglancer(s,
                                       synapses,
                                       score_thr=-1,
                                       radius=30,
                                       show_ellipsoid_annotation=False,
                                       name='',
                                       color='#00ff00'):
    pre_sites = []
    post_sites = []
    connectors = []
    below_score = 0
    neuro_id = 0
    for syn in synapses:
        if syn.score is None:
            add_synapse = True
        else:
            add_synapse = syn.score > score_thr
        if not add_synapse:
            below_score += 1
        else:
            pre_site = np.flip(syn.location_pre)
            post_site = np.flip(syn.location_post)
            description = f"id: {syn.id}, pre_seg: {syn.id_segm_pre}, post_seg: {syn.id_segm_post}, score: {syn.score}"
            pre_sites.append(
                neuroglancer.EllipsoidAnnotation(center=pre_site,
                                                 radii=(radius, radius,
                                                        radius),
                                                 id=neuro_id + 1))
            post_sites.append(
                neuroglancer.EllipsoidAnnotation(center=post_site,
                                                 radii=(radius, radius,
                                                        radius),
                                                 id=neuro_id + 2))
            connectors.append(
                neuroglancer.LineAnnotation(point_a=pre_site,
                                            point_b=post_site,
                                            id=neuro_id + 3,
                                            description=description))
            neuro_id += 3

    s.layers['connectors_{}'.format(name)] = neuroglancer.AnnotationLayer(
        voxel_size=(1, 1, 1),
        filter_by_segmentation=False,
        annotation_color=color,
        annotations=connectors,
    )
    if show_ellipsoid_annotation:
        s.layers['pre_sites'] = neuroglancer.AnnotationLayer(
            voxel_size=(1, 1, 1),
            filter_by_segmentation=False,
            annotation_color='#00ff00',
            annotations=pre_sites,
        )
        s.layers['post_sites'] = neuroglancer.AnnotationLayer(
            voxel_size=(1, 1, 1),
            filter_by_segmentation=False,
            annotation_color='#ff00ff',
            annotations=post_sites,
        )
    print('filtered out {}/{} of synapses'.format(below_score, len(synapses)))
    print('displaying {} synapses'.format(len(post_sites)))
    return synapses
Exemple #30
0
                    neuroglancer.EllipsoidAnnotation(center=(x, y, z),
                                                     radii=(tuple([10] * 3)),
                                                     id=node_id,
                                                     segments=None))
            data_k["maxima"] = maxima

    prediction_views.append(data_k)

with viewer.txn() as s:
    for k, view in enumerate(prediction_views):
        for dset, dset_data in view["view_dsets"].items():
            add_layer(s, dset_data, str(k) + "_" + dset)

        s.layers['{}_maxima'.format(k)] = neuroglancer.AnnotationLayer(
            voxel_size=(1, 1, 1),
            filter_by_segmentation=False,
            annotation_color='#add8e6',
            annotations=view["maxima"])

        try:
            s.layers['{}_connectors'.format(k)] = neuroglancer.AnnotationLayer(
                voxel_size=(1, 1, 1),
                filter_by_segmentation=False,
                annotation_color='#00ff00',
                annotations=view["edge_connectors"])
        except KeyError:
            print("No edges in prediction")
    add_layer(s, prediction_views[0]["raw"], 'raw')

print(viewer)