#in another ipython window run:
import neuroglancer

neuroglancer.set_static_content_source(
    url="https://nglancer.pni.princeton.edu")
brainname = "20200701_12_55_28_20170207_db_bl6_crii_rpv_01"
port = 1350
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
    s.layers["%s" % brainname] = neuroglancer.ImageLayer(
        source="precomputed://http://localhost:%s" % port)
print(viewer)
#this should add the above volume to the neuroglancer window

with viewer.txn() as s:
    s.layers["%s_atlas" % brainname] = neuroglancer.SegmentationLayer(
        source="precomputed://http://localhost:%s" % int(port + 1))
print(viewer)
#this should add the atlas volume to the neuroglancer window

###WINDOW 3###
#take screenshots
#NOTE: THIS DOESN'T WORK IN A SPYDER, CONSOLE, ACTIVATE ENV AND RUN IN IPYTHON SHELL
import os

svdst = "/home/wanglab/Desktop/%s/cortex_wo_overlay_zoom " % brainname
#make sure these directories exist
if not os.path.exists(os.path.dirname(svdst)):
    os.mkdir(os.path.dirname(svdst))  #brain directory
if not os.path.exists(svdst): os.mkdir(svdst)  #structure directory
ss = neuroglancer.ScreenshotSaver(viewer, svdst)
with viewer.config_state.txn() as s:
Esempio n. 2
0
from __future__ import print_function

import webbrowser

import neuroglancer

viewer = neuroglancer.Viewer()

with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
    )
    s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
    )
    s.layout = neuroglancer.row_layout([
        neuroglancer.column_layout([
            neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
            neuroglancer.LayerGroupViewer(layers=['image', 'ground_truth']),
        ]),
        neuroglancer.column_layout([
            neuroglancer.LayerGroupViewer(layers=['ground_truth']),
            neuroglancer.LayerGroupViewer(layers=['ground_truth']),
        ]),
    ])
print(viewer.state)
print(viewer)
webbrowser.open_new(viewer.get_viewer_url())
    def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
        self.graph = graph
        self.agglo_id = agglo_id
        self.image_url = image_url
        self.segmentation_url = segmentation_url
        self.state = InteractiveState(state_path)
        self.cached_split_result = CachedSplitResult(
            state=self.state, graph=self.graph, agglo_id=self.agglo_id)
        self.agglo_members = set(self.graph.get_agglo_members(agglo_id))

        if state_path is not None and os.path.exists(state_path):
            self.state.load()
        else:
            self.state.initialize(self.agglo_members)

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inclusive-seed', self._add_inclusive_seed)
        viewer.actions.add('exclusive-seed', self._add_exclusive_seed)
        viewer.actions.add('next-component', self._next_component)
        viewer.actions.add('prev-component', self._prev_component)
        viewer.actions.add('new-component', self._make_new_component)
        viewer.actions.add('exclude-component', self._exclude_component)
        viewer.actions.add('exclude-all-but-component', self._exclude_all_but_component)

        key_bindings = [
            ['bracketleft', 'prev-component'],
            ['bracketright', 'next-component'],
            ['at:dblclick0', 'exclude-component'],
            ['at:shift+mousedown2', 'exclude-all-but-component'],
            ['at:control+mousedown0', 'inclusive-seed'],
            ['at:shift+mousedown0', 'exclusive-seed'],
            ['enter', 'new-component'],
        ]

        with viewer.txn() as s:
            s.layers.append(
                name='image',
                layer=neuroglancer.ImageLayer(source=self.image_url),
            )
            s.layers.append(
                name='original',
                layer=neuroglancer.SegmentationLayer(
                    source=self.segmentation_url,
                    segments=self.agglo_members,
                ),
            )
            s.layers.append(
                name='unused',
                layer=neuroglancer.SegmentationLayer(source=self.segmentation_url,
                                                     ),
                visible=False,
            )
            s.layers.append(
                name='split-result',
                layer=neuroglancer.SegmentationLayer(
                    source=self.segmentation_url,
                    segments=self.agglo_members,
                ),
            )
            s.concurrent_downloads = 256
            self._update_state(s)

        with viewer.config_state.txn() as s:
            s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
                                                               for key, command in key_bindings))
            for key, command in key_bindings:
                s.input_event_bindings.viewer[key] = command
                s.input_event_bindings.slice_view[key] = command
                s.input_event_bindings.perspective_view[key] = command
            self._update_config_state(s)

        viewer.shared_state.add_changed_callback(
            lambda: viewer.defer_callback(self._handle_state_changed))
viewer = neuroglancer.Viewer()
dimensions = neuroglancer.CoordinateSpace(
    names=['x', 'y', 'z'],
    units='nm',
    scales=[10, 10, 10],
)
with viewer.txn() as s:
    s.layers.append(
        name='a',
        layer=neuroglancer.SegmentationLayer(
            source=[
                neuroglancer.LocalVolume(
                    data=segmentation,
                    dimensions=dimensions,
                ),
                SkeletonSource(dimensions),
            ],
            skeleton_shader='void main() { emitRGB(colormapJet(affinity)); }',
            selected_alpha=0,
            not_selected_alpha=0,
            segments=[395750],
        ))
    # Can adjust the skeleton rendering options
    s.layers[0].skeleton_rendering.mode2d = 'lines'
    s.layers[0].skeleton_rendering.line_width2d = 3
    s.layers[0].skeleton_rendering.mode3d = 'lines_and_points'
    s.layers[0].skeleton_rendering.line_width3d = 10

    # Can adjust visibility of layer side panel
    s.selected_layer.layer = 'a'
    s.selected_layer.visible = True
def test_annotate(webdriver):
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"],
                                                    units="nm",
                                                    scales=[1, 1])
        s.position = [0, 0]
        s.layers.append(
            name='seg1',
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(
                    dimensions=s.dimensions,
                    data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
                ), ),
            segments=[42],
            visible=False,
        )
        s.layers.append(
            name='seg2',
            layer=neuroglancer.SegmentationLayer(
                source=neuroglancer.LocalVolume(
                    dimensions=s.dimensions,
                    data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=43),
                ), ),
            segments=[],
            visible=False,
        )
        s.layers.append(
            name="a",
            layer=neuroglancer.LocalAnnotationLayer(
                dimensions=s.dimensions,
                annotation_relationships=['a', 'b'],
                linked_segmentation_layer={
                    'a': 'seg1',
                    'b': 'seg2'
                },
                filter_by_segmentation=['a', 'b'],
                ignore_null_segment_filter=False,
                annotation_properties=[
                    neuroglancer.AnnotationPropertySpec(
                        id='color',
                        type='rgb',
                        default='red',
                    )
                ],
                annotations=[
                    neuroglancer.PointAnnotation(
                        id='1',
                        point=[0, 0],
                        segments=[[42], []],
                        props=['#0f0'],
                    ),
                    neuroglancer.PointAnnotation(
                        id='2',
                        point=[0, 0],
                        segments=[[], [43]],
                        props=['#00f'],
                    ),
                    neuroglancer.PointAnnotation(
                        id='3',
                        point=[0, 0],
                        segments=[[], [44]],
                        props=['#0ff'],
                    ),
                ],
                shader='''
void main() {
  setColor(prop_color());
  setPointMarkerSize(1000.0);
}
''',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
        s.selected_layer.layer = 'a'

    def expect_color(seg1, seg2, color):
        with webdriver.viewer.txn() as s:
            s.layers['seg1'].segments = seg1
            s.layers['seg2'].segments = seg2
        webdriver.sync()
        screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
        np.testing.assert_array_equal(
            screenshot.image_pixels,
            np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))

    expect_color(seg1=[42], seg2=[], color=[0, 255, 0, 255])
    expect_color(seg1=[], seg2=[43], color=[0, 0, 255, 255])
    expect_color(seg1=[], seg2=[44], color=[0, 255, 255, 255])
def add_structure_to_neuroglancer( viewer, str_contour, structure, stack, first_sec, last_sec, color_radius=4, xy_ng_resolution_um=10, threshold=0.5, color=1, solid_volume=False, no_offset_big_volume=False, save_results=False, return_with_offsets=False, add_to_ng=True, human_annotation=False ):
    """
    Takes in the contours of a structure as well as the name, sections spanned by the structure, and a list of
    parameters that dictate how it is rendered.
    
    Returns the binary structure volume.
    """
    xy_ng_resolution_um = xy_ng_resolution_um # X and Y voxel length in microns
    color_radius = color_radius*(10.0/xy_ng_resolution_um)**0.5
    
    stack_parameters_ng = get_ng_params( stack )
    ng_section_min = stack_parameters_ng['prep2_section_min']
    ng_section_max = stack_parameters_ng['prep2_section_max']
    s3_offset_from_local_x = stack_parameters_ng['local_offset_x']
    s3_offset_from_local_y = stack_parameters_ng['local_offset_y']
    s3_offset_from_local_slices = stack_parameters_ng['local_offset_slices']
    
    # Max and Min X/Y Values given random initial values that will be replaced
    # X and Y resolution will be specified by the user in microns (xy_ng_resolution_umx by y_ng_resolution_um)
    max_x = 0
    max_y = 0
    min_x = 9999999
    min_y = 9999999
    # 'min_z' is the relative starting section (if the prep2 sections start at slice 100, and the structure starts at slice 110, min_z is 10 )
    # Z resolution is 20um for simple 1-1 correspondance with section thickness
    max_z = (last_sec-ng_section_min)
    min_z = (first_sec-ng_section_min)
    if max_z>ng_section_max:
        max_z = ng_section_min
    if min_z<0:
        min_z = 0
    # Scaling factor is (0.46/X). Scaling from resolution of 0.46 microns to X microns. 
    scale_xy = 0.46/xy_ng_resolution_um

    # X,Y are 10um voxels. Z is 20um voxels. 
    # str_contour_ng_resolution is the previous contour data rescaled to neuroglancer resolution
    str_contour_ng_resolution = {}

    for section in str_contour:
        # Load (X,Y) coordinates on this contour
        section_contours = str_contour[ section ][structure][ threshold ]
        # (X,Y) coordinates will be rescaled to the new resolution and placed here
        # str_contour_ng_resolution starts at z=0 for simplicity, must provide section offset later on
        str_contour_ng_resolution[section-first_sec] = []
        # Number of (X,Y) coordinates
        num_contours = len( section_contours )
        # Cycle through each coordinate pair
        for coordinate_pair in range(num_contours):

            curr_coors = section_contours[ coordinate_pair ]
            # Rescale coordinate pair and add to new contour dictionary
            str_contour_ng_resolution[section-first_sec].append( [scale_xy*curr_coors[0],scale_xy*curr_coors[1]] )
            # Replace Min/Max X/Y values with new extremes
            min_x = min( scale_xy*curr_coors[0], min_x)
            min_y = min( scale_xy*curr_coors[1], min_y)
            max_x = max( scale_xy*curr_coors[0], max_x)
            max_y = max( scale_xy*curr_coors[1], max_y)


    # Cast max and min values to int as they are used to build 3D numpy matrix
    max_x = int( np.ceil(max_x) ) 
    max_y = int( np.ceil(max_y) )
    min_x = int( np.floor(min_x) )
    min_y = int( np.floor(min_y) )

    # Create empty 'structure_volume' using min and max values found earlier. Acts as a bounding box for now
    structure_volume = np.zeros( (max_z-min_z, max_y-min_y, max_x-min_x), dtype = np.uint8 )
    z_voxels, y_voxels, x_voxels =  np.shape(structure_volume)
    print(  np.shape(structure_volume) )

    # Go through every slice. For every slice color in the voxels corrosponding to the contour's coordinate pair
    for slice in range(z_voxels):
        
        # For Human Annotated files, sometimes there is a missing set of contours for a slice
        try:
            slice_contour = np.asarray( str_contour_ng_resolution[slice] )
        except:
            continue

        for xy_pair in slice_contour:
            x_voxel = int(xy_pair[0])-min_x
            y_voxel = int(xy_pair[1])-min_y
            
            structure_volume[slice,y_voxel,x_voxel] = color

            # Instead of coloring a single voxel, color all in a specified radius from this voxel!
            lower_bnd_offset = int( np.floor(1-color_radius) )
            upper_bnd_offset = int( np.ceil(color_radius) )
            for x_coor_color_radius in range( lower_bnd_offset, upper_bnd_offset):
                for y_coor_color_radius in range( lower_bnd_offset, upper_bnd_offset):

                    x_displaced_voxel = x_voxel + x_coor_color_radius
                    y_displaced_voxel = y_voxel + y_coor_color_radius
                    distance = ( (y_voxel-y_displaced_voxel)**2 + (x_voxel-x_displaced_voxel)**2 )**0.5
                    # If the temporary coordinate is within the specified radius AND inside the 3D matrix
                    if distance<color_radius and \
                    x_displaced_voxel<x_voxels and \
                    y_displaced_voxel<y_voxels and \
                    x_displaced_voxel>0 and \
                    y_displaced_voxel>0:
                        try:
                            # Set temporary coordinate to be visible
                            structure_volume[slice,y_displaced_voxel,x_displaced_voxel] = color
                        except:
                            pass
                        
        if solid_volume:
            structure_volume[slice,:,:] = fill_in_structure( structure_volume[slice,:,:], color )

    # structure_volume

    display_name = structure+'_'+str(threshold)+'_'+str(color)
    
    # If the amount of slices to shift by is nonzero
    z_offset = min_z
    if s3_offset_from_local_slices!=0:
        z_offset = min_z + s3_offset_from_local_slices
    
    # For annoying reasons, it's possible that the croppingbox on S3 is sometimes different than local
    if s3_offset_from_local_x!=0 or s3_offset_from_local_y!=0:
        hc_x_offset = s3_offset_from_local_x*10/xy_ng_resolution_um
        hc_y_offset = s3_offset_from_local_y*10/xy_ng_resolution_um
        true_ng_x_offset = min_x+hc_x_offset
        true_ng_y_offset = min_y+hc_y_offset
    else:
        true_ng_x_offset = min_x
        true_ng_y_offset = min_y
    xyz_str_offsets = [true_ng_x_offset, true_ng_y_offset, z_offset]
    
    # If instead of a small volume and an offset, we want no offset and an extremely large+sparse volume
    if no_offset_big_volume:
        largest_z_offset = np.max([min_z,z_offset])
        big_sparse_structure_volume = np.zeros((z_voxels+z_offset, y_voxels+true_ng_y_offset, x_voxels+true_ng_x_offset), dtype=np.uint8)
        
        try:
            big_sparse_structure_volume[-z_voxels:,-y_voxels:,-x_voxels:] = structure_volume
        # If part of the structure ends up being cut off due to cropping, retake the size of it
        except Exception as e:
            str_new_voxels_zyx = np.shape(structure_volume)
            large_sparse_str_voxels_zyx = np.shape(big_sparse_structure_volume)
            low_end_z_len = np.min([large_sparse_str_voxels_zyx[0], str_new_voxels_zyx[0]])
            low_end_y_len = np.min([large_sparse_str_voxels_zyx[1], str_new_voxels_zyx[1]])
            low_end_x_len = np.min([large_sparse_str_voxels_zyx[2], str_new_voxels_zyx[2]])
            print(e) # Maybe can remove this whole block after new changes
            print('Cutting out some slices on the edge of the structure')
            print('New shape: ',low_end_z_len, low_end_y_len, low_end_x_len )
            big_sparse_structure_volume[-low_end_z_len:,-low_end_y_len:,-low_end_x_len:] = \
                structure_volume[-low_end_z_len:,-low_end_y_len:,-low_end_x_len:]
            #big_sparse_structure_volume[-str_new_voxels_zyx[0]:,-str_new_voxels_zyx[1]:,-str_new_voxels_zyx[2]:] = \
            #    structure_volume[-large_sparse_str_voxels_zyx[0]:,-large_sparse_str_voxels_zyx[1]:,-large_sparse_str_voxels_zyx[2]:]
                
        #del structure_volume
        structure_volume = big_sparse_structure_volume.copy()
        true_ng_x_offset = 0
        true_ng_y_offset = 0
        min_z = 0
            
            
    if add_to_ng:
        with viewer.txn() as s:
            s.layers[ display_name ] = neuroglancer.SegmentationLayer(
                source = neuroglancer.LocalVolume(
                    data=structure_volume, # Z,Y,X
                    voxel_size=[ xy_ng_resolution_um*1000, xy_ng_resolution_um*1000,20000], # X Y Z
                    voxel_offset = [ true_ng_x_offset, true_ng_y_offset, min_z] # X Y Z
                ),
                segments = [color]
        )
        
    if save_results:
        volumes_have_offset = not no_offset_big_volume
        
        fp_volume_root = get_volume_fp( stack, precomputed=False, human_annotated=human_annotation, \
                                  volume_type='structure', brain_crop='brainstem', xy_res=xy_ng_resolution_um, \
                                  z_res=20, offset=volumes_have_offset, color_scheme=1, \
                                  thickness_scheme=1, structure=structure )
        if not os.path.exists( fp_volume_root ):
            os.makedirs(fp_volume_root)
            # Save volume
        volume_fp = os.path.join( fp_volume_root, structure+'_volume.npy' )
        np.save( volume_fp, structure_volume)
        
        if volumes_have_offset:
            # Save offsets
            volume_offset_fp = os.path.join( fp_volume_root, structure+'_offset.txt' )
            with open( volume_offset_fp, 'w') as offset_file:
                insert_str =  str(min_x+hc_x_offset)+" "+str(min_y+hc_y_offset)+" "+str(min_z)
                offset_file.write(  insert_str )
            offset_file.close()
    
    if return_with_offsets:
        return structure_volume, xyz_str_offsets
    return structure_volume
    neuron_id = int(sys.argv[3])

    df_skels = pd.read_json(filename)
    df_synapses = pd.read_json(synapse_filename)

    input_site_color = '#72b9cb'
    output_site_color = '#c12430'

    viewer = neuroglancer.Viewer()

    with viewer.txn() as s:
        s.layers['raw'] = neuroglancer.ImageLayer(
            source=
            'precomputed://gs://neuroglancer-fafb-data/fafb_v14/fafb_v14_orig',
        )
        s.layers['ffn1'] = neuroglancer.SegmentationLayer(
            source='precomputed://gs://fafb-ffn1-20190805/segmentation', )

        add_neuron(s, df_skels, [neuron_id])
        add_synapses(s,
                     df_synapses,
                     pre_neuron_id=neuron_id,
                     color=output_site_color,
                     name='output')
        add_synapses(s,
                     df_synapses,
                     post_neuron_id=neuron_id,
                     color=input_site_color,
                     name='input')

    print(viewer.__str__())