def _start_flood_fill(self, pos): self._stop_flood_fill() inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8) inf_volume = neuroglancer.LocalVolume(data=inf_results, dimensions=self.dimensions) with self.viewer.txn() as s: s.layers['points'] = neuroglancer.LocalAnnotationLayer( self.dimensions) s.layers['inference'] = neuroglancer.ImageLayer( source=inf_volume, shader=''' void main() { float v = toNormalized(getDataValue(0)); vec4 rgba = vec4(0,0,0,0); if (v != 0.0) { rgba = vec4(colormapJet(v), 1.0); } emitRGBA(rgba); } ''', ) self.flood_fill_event = threading.Event() t = threading.Thread(target=self._do_flood_fill, kwargs=dict( initial_pos=pos, inf_results=inf_results, inf_volume=inf_volume, event=self.flood_fill_event, )) t.daemon = True t.start()
def mark_synapse(self, s, layer, add): voxel_coordinates = s.mouse_voxel_coordinates if voxel_coordinates is None: return block_size = self.false_merge_block_size level = self.cur_block_level with self.viewer.txn() as s: if s.layers.index(layer) == -1: s.layers[layer] = neuroglancer.LocalAnnotationLayer( dimensions=s.dimensions, shader=''' void main() { setBoundingBoxBorderWidth(0.0); setBoundingBoxFillColor(defaultColor()); } ''', annotation_color='#0f0' if layer == 'pre' else '#00f', ) annotations = s.layers[layer].annotations mask = make_block_mask(annotations=annotations, block_size=block_size, max_level=self.max_block_levels) mask.add_or_remove_sphere(np.array( [int(x) for x in voxel_coordinates]), np.array([1, 1, 1]) * 2**level, add=add) new_annotations = make_annotations_from_mask(mask=mask, block_size=block_size) s.layers[layer].annotations = new_annotations
def __init__(self, synapse_path, top_method='min', num_top_partners=10): with open(synapse_path, 'r') as f: synapse_data = json.load(f)['data'] self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id( synapse_data) self.top_method = top_method self.num_top_partners = num_top_partners dimensions = neuroglancer.CoordinateSpace( names=['x', 'y', 'z'], units='nm', scales=[8, 8, 8], ) viewer = self.viewer = neuroglancer.Viewer() viewer.actions.add('select-custom', self._handle_select) with viewer.config_state.txn() as s: s.input_event_bindings.data_view['dblclick0'] = 'select-custom' with viewer.txn() as s: s.projection_orientation = [ 0.63240087, 0.01582051, 0.05692779, 0.77238464 ] s.dimensions = dimensions s.position = [3000, 3000, 3000] s.layers['image'] = neuroglancer.ImageLayer( source= 'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image', ) s.layers['ground_truth'] = neuroglancer.SegmentationLayer( source= 'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth', ) s.layers['partners'] = neuroglancer.SegmentationLayer( source= 'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth', ) s.layers['synapses'] = neuroglancer.LocalAnnotationLayer( dimensions=dimensions, linked_segmentation_layer='ground_truth') s.layout = neuroglancer.row_layout([ neuroglancer.LayerGroupViewer( layout='xy', layers=['image', 'ground_truth', 'partners', 'synapses'], ), neuroglancer.LayerGroupViewer( layout='3d', layers=['ground_truth', 'synapses'], ), neuroglancer.LayerGroupViewer( layout='3d', layers=['partners', 'synapses'], ), ]) self.selected_segments = frozenset() self.viewer.shared_state.add_changed_callback( lambda: self.viewer.defer_callback(self.on_state_changed))
def __init__(self, raw, embedding, mst, classifier): self.raw = raw self.embedding = embedding self.classifier = classifier self.mst = mst self.points = [] self.mst_graph = nx.Graph() self.mst_graph.add_weighted_edges_from(mst) self.threshold = 0.5 self.raw_dimensions = neuroglancer.CoordinateSpace( names=['z', 'y', 'x'], units='nm', scales=raw.voxel_size) self.dimensions = neuroglancer.CoordinateSpace( names=['c^', 'z', 'y', 'x'], units=[''] + 3*['nm'], scales=raw.voxel_size) # if len(raw.shape) > 3: # volume_shape = raw.shape[1:] # else: volume_shape = raw.shape print(f"Creating segmentation layer with shape {volume_shape}") #self.segmentation = np.arange(np.product(volume_shape),dtype=np.uint32) #self.segmentation = self.segmentation.reshape(volume_shape) self.segmentation = np.zeros(volume_shape, dtype=np.uint32) self.segmentation_volume = neuroglancer.LocalVolume( data=self.segmentation, dimensions=self.raw_dimensions) self.viewer = neuroglancer.Viewer() self.viewer.actions.add('label_fg', self._label_fg) self.viewer.actions.add('label_bg', self._label_bg) self.viewer.actions.add('update_seg', self._update_segmentation) with self.viewer.config_state.txn() as s: s.input_event_bindings.data_view['shift+mousedown0'] = 'label_fg' s.input_event_bindings.data_view['shift+mousedown1'] = 'label_bg' s.input_event_bindings.data_view['keyu'] = 'update_seg' with self.viewer.txn() as s: add_layer(s, self.raw, 'raw') add_layer(s, self.embedding, 'embedding') s.layers['embedding'].visible = False s.layers['points'] = neuroglancer.LocalAnnotationLayer( self.dimensions) s.layers['segmentation'] = neuroglancer.SegmentationLayer( source=self.segmentation_volume)
def _append_synapse_annotation_layer( self, viewer_state: ng.viewer_state.ViewerState, name: str, synapses: Synapses): annotations = [] pre_synapses = synapses.pre_with_physical_coordinate self._append_point_annotation_layer(viewer_state, name + '_pre', pre_synapses) post_synapses = synapses.post_with_physical_coordinate if post_synapses is not None: for post_idx in range(post_synapses.shape[0]): pre_idx = post_synapses[post_idx, 0] pre_coordinate = pre_synapses[pre_idx, :] post_coordinate = post_synapses[post_idx, 1:] post_annotation = ng.LineAnnotation( id=str(post_idx), # note that the synapse coordinate is already in xyz order # so we do not need to reverse it! pointA=pre_coordinate, pointB=post_coordinate, props=['#0ff', 5]) annotations.append(post_annotation) viewer_state.layers.append( name=name, layer=ng.LocalAnnotationLayer( dimensions=ng.CoordinateSpace(names=['z', 'y', 'x'], units="nm", scales=(1, 1, 1)), annotation_properties=[ ng.AnnotationPropertySpec( id='color', type='rgb', default='red', ), ng.AnnotationPropertySpec(id='size', type='float32', default=5) ], annotations=annotations, shader=''' void main() { setColor(prop_color()); setPointMarkerSize(prop_size()); } ''', ), )
def annotate_points(ngviewer, dimensions, pointscolor, points_df, layer_name, layer_scale): """Annotate points from a dataframe (defunct do not use..). Parameters ---------- ngviewer: Neuroglancer viewer dimensions : dimensions and units of 'x', 'y', 'z' points_df : dataframe containing 'x', 'y', 'z' columns layer_scale : scaling from voxel to native space in 'x', 'y', 'z' """ pointname = points_df['description'] points_df.loc[:, ['x', 'y', 'z' ]] = points_df.loc[:, ['x', 'y', 'z']].values / 1000 with ngviewer.txn() as s: s.layers.append(name=layer_name, layer=neuroglancer.LocalAnnotationLayer( dimensions=dimensions, ignore_null_segment_filter=False, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default='blue', ) ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(5.0); } ''', )) for index, indivpoints in points_df.iterrows(): s.layers[layer_name].annotations.append( neuroglancer.PointAnnotation( id=str(index), point=[ indivpoints.x * layer_scale[0], indivpoints.y * layer_scale[1], indivpoints.z * layer_scale[2] ], props=[pointscolor], description=pointname[index])) status = True return status
def pointlayer(txn, name, x, y, z, color="yellow", size=5, shader=pointlayer_shader, voxel_size=default_voxel_size): """Add a point layer. :param txn: the neuroglancer viewer transaction context :param name: the displayable name of the point layer :param x: the x coordinate per point :param y: the y coordinate per point :param z: the z coordinate per point :param color: the color of the points in the layer, e.g. "red", "yellow" :param size: the size of the points :param voxel_size: the size of a voxel (x, y, z) """ dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"], units=["µm", "µm", "µm"], scales=voxel_size) layer = neuroglancer.LocalAnnotationLayer( dimensions=dimensions, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default=color, ), neuroglancer.AnnotationPropertySpec( id='size', type='float32', default=float(size), ) ], annotations=[ neuroglancer.PointAnnotation( id=i + 1, point=[zz, yy, xx]) # input points should be in zyx order for i, (xx, yy, zz) in enumerate(zip(x, y, z)) ], shader=shader) txn.layers[name] = layer
def setup_viewer(viewer): with viewer.txn() as s: s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"], units="nm", scales=[1, 1, 1]) s.layers.append(name="seg", layer=neuroglancer.SegmentationLayer( source=neuroglancer.LocalVolume( data=np.array([[[42]]], dtype=np.uint32), dimensions=s.dimensions))) s.layers.append( name="a", layer=neuroglancer.LocalAnnotationLayer( dimensions=s.dimensions, linked_segmentation_layer={'segments': 'seg'}, filter_by_segmentation=['segments'], ignore_null_segment_filter=False), ) s.layout = 'xy' s.cross_section_scale = 1e-6 s.show_axis_lines = False s.selected_layer.layer = 'a'
def _append_point_annotation_layer( self, viewer_state: ng.viewer_state.ViewerState, name: str, points: np.ndarray): annotations = [] for sid in range(points.shape[0]): # we would like to show line first and then the presynapse point # so, we have distinct color to show T-bar pre_annotation = ng.PointAnnotation(id=str(sid), point=points[sid, :].tolist(), props=['#ff0', 8]) annotations.append(pre_annotation) viewer_state.layers.append( name=name, layer=ng.LocalAnnotationLayer( dimensions=ng.CoordinateSpace(names=['z', 'y', 'x'], units="nm", scales=(1, 1, 1)), annotation_properties=[ ng.AnnotationPropertySpec( id='color', type='rgb', default='red', ), ng.AnnotationPropertySpec(id='size', type='float32', default=5) ], annotations=annotations, shader=''' void main() { setColor(prop_color()); setPointMarkerSize(prop_size()); } ''', ), )
def test_annotate(webdriver): with webdriver.viewer.txn() as s: s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1]) s.position = [0, 0] s.layers.append( name='seg1', layer=neuroglancer.SegmentationLayer( source=neuroglancer.LocalVolume( dimensions=s.dimensions, data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42), ), ), segments=[42], visible=False, ) s.layers.append( name='seg2', layer=neuroglancer.SegmentationLayer( source=neuroglancer.LocalVolume( dimensions=s.dimensions, data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=43), ), ), segments=[], visible=False, ) s.layers.append( name="a", layer=neuroglancer.LocalAnnotationLayer( dimensions=s.dimensions, annotation_relationships=['a', 'b'], linked_segmentation_layer={'a': 'seg1', 'b': 'seg2'}, filter_by_segmentation=['a', 'b'], ignore_null_segment_filter=False, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default='red', ) ], annotations=[ neuroglancer.PointAnnotation( id='1', point=[0, 0], segments=[[42], []], props=['#0f0'], ), neuroglancer.PointAnnotation( id='2', point=[0, 0], segments=[[], [43]], props=['#00f'], ), neuroglancer.PointAnnotation( id='3', point=[0, 0], segments=[[], [44]], props=['#0ff'], ), ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(1000.0); } ''', ), ) s.layout = 'xy' s.cross_section_scale = 1e-6 s.show_axis_lines = False s.selected_layer.layer = 'a' def expect_color(seg1, seg2, color): with webdriver.viewer.txn() as s: s.layers['seg1'].segments = seg1 s.layers['seg2'].segments = seg2 screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot np.testing.assert_array_equal(screenshot.image_pixels, np.tile(np.array(color, dtype=np.uint8), (10, 10, 1))) expect_color(seg1=[42], seg2=[], color=[0, 255, 0, 255]) expect_color(seg1=[], seg2=[43], color=[0, 0, 255, 255]) expect_color(seg1=[], seg2=[44], color=[0, 255, 255, 255])
print('Setting visibility to true') viewer.actions.add('toggle-visibility', toggle_visibility) with viewer.config_state.txn() as s: s.input_event_bindings.viewer['keys'] = 'toggle-visibility' with viewer.txn() as s: s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1]) s.position = [150, 150] s.layers.append( name="a", layer=neuroglancer.LocalAnnotationLayer( dimensions=s.dimensions, annotations=[ neuroglancer.PointAnnotation( id='1', point=[150, 150], ), ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(prop_size()); } ''', ), ) s.layout = 'xy' s.selected_layer.layer = 'a' print(viewer)
def annotate_synapses(ngviewer, dimensions, x): """Annotate postsynapses of a neuron/neuronlist. (defunct do not use..). This function annotates synapses of a neuron/neuronlist Parameters ---------- x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList ngviewer: Neuroglancer viewer """ if isinstance(x, pymaid.core.CatmaidNeuron): neuronlist = pymaid.core.CatmaidNeuronList(x) elif isinstance(x, navis.core.TreeNeuron): neuronlist = navis.core.NeuronList(x) elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)): neuronlist = x else: raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"') skeldatasegidlist = [] for neuron in neuronlist: skeldatasegidlist.append(neuron.id) # postsynapses first.. with ngviewer.txn() as s: s.layers.append( name="post_synapses", layer=neuroglancer.LocalAnnotationLayer( dimensions=dimensions, annotation_relationships=['post_synapses'], linked_segmentation_layer={'post_synapses': 'skeletons'}, filter_by_segmentation=['post_synapses'], ignore_null_segment_filter=False, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default='blue', ) ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(5.0); } ''', )) with ngviewer.txn() as s: for neuronidx in range(len(neuronlist)): neuronelement = neuronlist[neuronidx] postsynapses = neuronelement.postsynapses postsynapses = postsynapses.reset_index() for index, postsyn in postsynapses.iterrows(): s.layers['post_synapses'].annotations.append( neuroglancer.PointAnnotation( id=str(index), point=[postsyn.x/1000, postsyn.y/1000, postsyn.z/1000], segments=[[skeldatasegidlist[neuronidx]]], props=['#0000ff'], ) ) # presynapses next.. with ngviewer.txn() as s: s.layers.append( name="pre_synapses", layer=neuroglancer.LocalAnnotationLayer( dimensions=dimensions, annotation_relationships=['pre_synapses'], linked_segmentation_layer={'pre_synapses': 'skeletons'}, filter_by_segmentation=['pre_synapses'], ignore_null_segment_filter=False, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default='red', ) ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(5.0); } ''', )) with ngviewer.txn() as s: for neuronidx in range(len(neuronlist)): neuronelement = neuronlist[neuronidx] presynapses = neuronelement.presynapses presynapses = presynapses.reset_index() for index, presyn in presynapses.iterrows(): s.layers['pre_synapses'].annotations.append( neuroglancer.PointAnnotation( id=str(index), point=[presyn.x/1000, presyn.y/1000, presyn.z/1000], segments=[[skeldatasegidlist[neuronidx]]], props=['#ff0000'], ) ) status = True return status
layer=neuroglancer.LocalAnnotationLayer( dimensions=s.dimensions, annotation_properties=[ neuroglancer.AnnotationPropertySpec( id='color', type='rgb', default='red', ), neuroglancer.AnnotationPropertySpec( id='size', type='float32', default=10, ) ], annotations=[ neuroglancer.PointAnnotation( id='1', point=[150, 150], props=['#0f0', 5], ), neuroglancer.PointAnnotation( id='2', point=[250, 100], props=['#ff0', 30], ), ], shader=''' void main() { setColor(prop_color()); setPointMarkerSize(prop_size()); } ''', ),
data=affine_transformed_arr, dimensions=dimensions, voxel_offset=(0, 0, 0)) #%% viewer = neuroglancer.Viewer() with viewer.txn() as s: s.layers.clear() s.layers.append(name='fixed', layer=fixed_layer) s.layers.append(name='moving', layer=moving_layer) print(s.dimensions) s.layers.append(name="com", layer=neuroglancer.LocalAnnotationLayer( dimensions=dimensions, annotations=[ neuroglancer.PointAnnotation( id='d6704f30d2f08f1795d73bf387da7d5eec9d813f', point=[100, 100, 200]), neuroglancer.PointAnnotation(id='234', point=[100, 100, 300]) ])) print(viewer) #%% viewer = neuroglancer.Viewer() with viewer.txn() as s: s.layers.clear() s.layers.append(name='fixed', layer=fixed_layer) s.layers.append(name='transformed', layer=transformed_layer) print(s.dimensions) s.layers.append(name="com", layer=neuroglancer.LocalAnnotationLayer(