Ejemplo n.º 1
0
 def add_stack(self):
     if self.stack == 'MD585':
         with self.viewer.txn() as s:
             s.layers[self.stack + '_image'] = neuroglancer.ImageLayer( \
                 source='precomputed://https://mousebrainatlas-datajoint-jp2k.s3.amazonaws.com/precomputed/MD585_fullres')
     elif self.stack == 'MD589':
         with self.viewer.txn() as s:
             s.layers[self.stack + '_image'] = neuroglancer.ImageLayer( \
                 source='precomputed://https://mousebrainatlas-datajoint-jp2k.s3.amazonaws.com/precomputed/MD589_fullres')
     elif self.stack == 'MD594':
         with self.viewer.txn() as s:
             s.layers[self.stack + '_image'] = neuroglancer.ImageLayer( \
                 source='precomputed://https://mousebrainatlas-datajoint-jp2k.s3.amazonaws.com/precomputed/MD594_fullres')
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        viewer.actions.add('start-fill', self._start_fill_action)
        viewer.actions.add('stop-fill', self._stop_fill_action)
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
            s.input_event_bindings.data_view['keyt'] = 'stop-fill'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            self.flood_fill_event = None
Ejemplo n.º 3
0
def test_title(webdriver):
    if webdriver.browser == 'firefox':
        pytest.skip('test can hang under firefox')
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(data=a,
                                                                          dimensions=s.dimensions),
                                          ),
        )

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = 'the title'

    webdriver.sync()

    assert webdriver.driver.title == 'the title - neuroglancer'

    with webdriver.viewer.txn() as s:
        s.title = None

    webdriver.sync()

    assert webdriver.driver.title == 'neuroglancer'
Ejemplo n.º 4
0
def layer(txn,
          name,
          img,
          shader,
          multiplier,
          offx=0,
          offy=0,
          offz=0,
          voxel_size=default_voxel_size):
    """Add an image layer to Neuroglancer

    :param txn: The transaction context of the viewer

    :param name: The name of the layer as displayed in Neuroglancer

    :param img: The image to display

    :param shader: the shader to use when displaying, e.g. gray_shader

    :param multiplier: the multiplier to apply to the normalized data value.
    This can be used to brighten or dim the image.
    """
    if isinstance(img, str):
        frac = multiplier
        source = img
    else:
        frac = multiplier / np.percentile(img, 99.9)
        if img.dtype.kind in ("i", "u"):
            frac = frac * np.iinfo(img.dtype).max
        source = neuroglancer.LocalVolume(img,
                                          voxel_offset=(offx, offy, offz),
                                          voxel_size=voxel_size)
    txn.layers[name] = neuroglancer.ImageLayer(source=source,
                                               shader=shader % frac)
 def refresh(self):
     with self.viewer.txn() as s:
         s.voxel_size = self.voxel_size * 1000
         s.layers["image"] = neuroglancer.ImageLayer(
             source=self.url, shader=cubehelix_shader % self.brightness)
         s.layers["points"] = neuroglancer.PointAnnotationLayer(
             points=self.points)
Ejemplo n.º 6
0
def glance_precomputed(viewer,
                       image=None,
                       host='localhost',
                       labels=None,
                       port=41000):
    '''supply cloud paths'''
    if image:
        image_path = 'precomputed://http://localhost:{}/'.format(
            port) + os.path.relpath(image)
        #image_path = 'precomputed://'+image
        #image_path = 'precomputed://ftp://localhost/cloud_test'
        #image_path = 'precomputed://http://localhost:41000'
        print('Image Source:', image_path)
    else:
        return None
    if labels:
        labels_path = 'precomputed://http://localhost:{}/'.format(
            port) + os.path.relpath(labels)
        print('Labels Source:', labels_path)
    with viewer.txn() as s:
        if image is not None:
            s.layers['image'] = neuroglancer.ImageLayer(source=image_path)
        if labels is not None:
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=labels_path)
    return viewer.get_viewer_url()
Ejemplo n.º 7
0
def show_grad_as_color(path, name, bounds=None, resolution=None):
    global viewer
    global res

    if resolution is None:
        r = res
    else:
        r = resolution
    print('Loading: ' + path)
    print('as: ' + name)
    with h5py.File(path, 'r') as hf:
        hf_keys = hf.keys()
        print(list(hf_keys))
        for key in list(hf_keys):

            if bounds is not None:
                data = np.array(hf[key][bounds[0][0]:bounds[0][1],
                                        bounds[1][0]:bounds[1][1],
                                        bounds[2][0]:bounds[2][1]])
            else:
                data = np.array(hf[key])

            data = grad_to_RGB(data)

            with viewer.txn() as s:
                s.layers[name] = neuroglancer.ImageLayer(
                    source=neuroglancer.LocalVolume(data, voxel_size=res),
                    shader="""void main()
                                                                        { 
                                                                            emitRGB(vec3(toNormalized(getDataValue(0)), 
                                                                            toNormalized(getDataValue(1)), 
                                                                            toNormalized(getDataValue(2)))); 
                                                                        }""",
                )
    def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                 chunks=(64, 64, 64),
                                 dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(data=inf_results,
                                              dimensions=self.dimensions)

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(
                self.dimensions)
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
        self.flood_fill_event = threading.Event()
        t = threading.Thread(target=self._do_flood_fill,
                             kwargs=dict(
                                 initial_pos=pos,
                                 inf_results=inf_results,
                                 inf_volume=inf_volume,
                                 event=self.flood_fill_event,
                             ))
        t.daemon = True
        t.start()
    def add_volume(self, volume, layer_name=None, clear_layer=False):
        if self.viewer is None:
            self.viewer = neuroglancer.Viewer()

        if layer_name is None:
            layer_name = f'{self.layer_type}_{self.scales}'

        source = neuroglancer.LocalVolume(
            data=volume,
            dimensions=neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
                                                    units='nm',
                                                    scales=self.scales),
            voxel_offset=self.offset)

        if self.layer_type == 'segmentation':
            layer = neuroglancer.SegmentationLayer(source=source)
        else:
            layer = neuroglancer.ImageLayer(source=source)

        with self.viewer.txn() as s:
            if clear_layer:
                s.layers.clear()
            s.layers[layer_name] = layer

        print(f'A new layer named {layer_name} is added to:')
        print(self.viewer)
Ejemplo n.º 10
0
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(self.gt_vol.bounds.to_list()[3:],
                                      chunks=(64, 64, 64),
                                      dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(data=self.inf_results,
                                                   dimensions=self.dimensions)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
            )
Ejemplo n.º 11
0
    def __init__(self, synapse_path, top_method='min', num_top_partners=10):
        with open(synapse_path, 'r') as f:
            synapse_data = json.load(f)['data']
        self.synapses_by_id, self.synapse_partner_counts = get_synapses_by_id(
            synapse_data)
        self.top_method = top_method
        self.num_top_partners = num_top_partners

        dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('select-custom', self._handle_select)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['dblclick0'] = 'select-custom'
        with viewer.txn() as s:
            s.projection_orientation = [
                0.63240087, 0.01582051, 0.05692779, 0.77238464
            ]
            s.dimensions = dimensions
            s.position = [3000, 3000, 3000]
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['partners'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['synapses'] = neuroglancer.LocalAnnotationLayer(
                dimensions=dimensions,
                linked_segmentation_layer='ground_truth')
            s.layout = neuroglancer.row_layout([
                neuroglancer.LayerGroupViewer(
                    layout='xy',
                    layers=['image', 'ground_truth', 'partners', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['ground_truth', 'synapses'],
                ),
                neuroglancer.LayerGroupViewer(
                    layout='3d',
                    layers=['partners', 'synapses'],
                ),
            ])

        self.selected_segments = frozenset()
        self.viewer.shared_state.add_changed_callback(
            lambda: self.viewer.defer_callback(self.on_state_changed))
Ejemplo n.º 12
0
    def _neuroglancer_layer(
            self) -> Tuple[neuroglancer.ImageLayer, Dict[str, Any]]:
        # Generates an Image layer. May not be correct if this crop contains a segmentation

        layer = neuroglancer.ImageLayer(source=self._neuroglancer_source())
        kwargs = {
            "visible": False,
            "blend": "additive",
        }
        return layer, kwargs
Ejemplo n.º 13
0
def handle_emdata(ngviewer, layer_kws):
    """Add the electron microscopy layer as a neuroglancer layer."""
    # This function adds EM layer to a neuroglancer instance.
    # The EM layers are usually corresponding to spaces like 'FAFB', etc
    ngspace = _get_ngspace(layer_kws)

    for layername in ngspace['layers']:
        if ngspace['layers'][layername]['type'] == 'image':
            with ngviewer.txn() as s:
                s.layers[layername] = neuroglancer.ImageLayer(
                    source=ngspace['layers'][layername]['source'], )

    return ngviewer
Ejemplo n.º 14
0
def visualize_trees(graphs: Dict[str, nx.DiGraph]):

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers["blegh"] = neuroglancer.ImageLayer(
            source=neuroglancer.LocalVolume(data=np.zeros([1, 1, 1]).transpose(
                [2, 1, 0]),
                                            voxel_size=[1, 1, 1]))
        node_id = itertools.count(start=1)
        for name, graph in graphs.items():
            add_trees(s, graph, node_id, name=name, visible=True)
    print(viewer)
    input("Hit ENTER to quit!")
Ejemplo n.º 15
0
    def __init__(self, state_path, bodies, labels, segmentation_url, image_url,
                 num_to_prefetch):
        self.state = State(state_path)
        self.num_to_prefetch = num_to_prefetch
        self.viewer = neuroglancer.Viewer()
        self.bodies = bodies
        self.state.load()
        self.total_voxels = sum(x.num_voxels for x in bodies)
        self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])

        with self.viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
            s.layers['segmentation'] = neuroglancer.SegmentationLayer(
                source=segmentation_url)
            s.navigation.zoom_factor = 66
            s.perspective_zoom = 1280
            s.show_slices = False
            s.concurrent_downloads = 256
            s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
            s.layout = '3d'

        key_bindings = [
            ['bracketleft', 'prev-index'],
            ['bracketright', 'next-index'],
            ['home', 'first-index'],
            ['end', 'last-index'],
            ['control+keys', 'save'],
        ]
        label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
        for label, label_key in zip(labels, label_keys):
            key_bindings.append([label_key, 'label-%s' % label])

            def label_func(s, label=label):
                self.set_label(s, label)

            self.viewer.actions.add('label-%s' % label, label_func)
        self.viewer.actions.add('prev-index', self._prev_index)
        self.viewer.actions.add('next-index', self._next_index)
        self.viewer.actions.add('first-index', self._first_index)
        self.viewer.actions.add('last-index', self._last_index)
        self.viewer.actions.add('save', self.save)

        with self.viewer.config_state.txn() as s:
            for key, command in key_bindings:
                s.input_event_bindings.viewer[key] = command
            s.status_messages['help'] = (
                'KEYS: ' + ' | '.join('%s=%s' % (key, command)
                                      for key, command in key_bindings))

        self.index = -1
        self.set_index(self._find_one_after_last_labeled_index())
Ejemplo n.º 16
0
def handle_synapticclefts(ngviewer, layer_kws):
    """Add the synapse cleft predictions for the em dataset as a neuroglancer layer."""
    # This function adds synapse cleft predictions to a neuroglancer instance.

    ngspace = _get_ngspace(layer_kws)

    for layername in ngspace['layers']:
        if ngspace['layers'][layername]['type'] == 'synapticcleft':
            with ngviewer.txn() as s:
                s.layers[layername] = neuroglancer.ImageLayer(
                    source=ngspace['layers'][layername]['source'],
                    shader=
                    'void main() {emitRGBA(vec4(0.0,0.0,1.0,toNormalized(getDataValue())));}',
                    opacity=0.73)

    return ngviewer
def display_split_result(graph, agglo_id, cur_eqs, supervoxel_map, split_seeds,
                         image_url, segmentation_url):

    agglo_members = set(graph.get_agglo_members(agglo_id))
    state = neuroglancer.ViewerState()
    state.layers.append(name='image',
                        layer=neuroglancer.ImageLayer(source=image_url))
    state.layers.append(
        name='original',
        layer=neuroglancer.SegmentationLayer(
            source=segmentation_url,
            segments=agglo_members,
        ),
        visible=False,
    )
    state.layers.append(
        name='isolated-supervoxels',
        layer=neuroglancer.SegmentationLayer(
            source=segmentation_url,
            segments=set(x for x, seeds in six.viewitems(supervoxel_map)
                         if len(seeds) > 1),
        ),
        visible=False,
    )
    state.layers.append(name='split',
                        layer=neuroglancer.SegmentationLayer(
                            source=segmentation_url,
                            equivalences=cur_eqs,
                            segments=set(cur_eqs[x] for x in agglo_members),
                        ))
    for label, component in six.viewitems(split_seeds):
        state.layers.append(
            name='seed%d' % label,
            layer=neuroglancer.PointAnnotationLayer(
                points=[seed['position'] for seed in component], ),
        )

    state.show_slices = False
    state.layout = '3d'
    all_seed_points = [
        seed['position'] for component in six.viewvalues(split_seeds)
        for seed in component
    ]
    state.voxel_coordinates = np.mean(all_seed_points, axis=0)
    state.perspective_zoom = 140
    return state
def test_slider(webdriver):

    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
        s.position = [0.5, 0.5]
        s.layers.append(
            name='image',
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
                dimensions=s.dimensions,
                data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
            ),
                                          ),
            visible=True,
            shader='''
#uicontrol float color slider(min=0, max=10)

void main() {
  emitGrayscale(color);
}
''',
            shader_controls={
                'color': 1,
            },
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False

    control = webdriver.viewer.state.layers['image'].shader_controls['color']
    assert control == 1

    def expect_color(color):
        webdriver.sync()
        screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
        np.testing.assert_array_equal(screenshot.image_pixels,
                                      np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))

    expect_color([255, 255, 255, 255])
    with webdriver.viewer.txn() as s:
        s.layers['image'].shader_controls = {
            'color': 0,
        }
    expect_color([0, 0, 0, 255])
def dataset_view(datasetname):
    dataset = DataSet.query.filter(DataSet.name == datasetname).first_or_404()
    state = neuroglancer.ViewerState()
    state.layers['img'] = neuroglancer.ImageLayer(source='precomputed://' +
                                                  dataset.image_source)
    if dataset.pychunkedgraph_viewer_source is not None:
        state.layers['seg'] = neuroglancer.SegmentationLayer(
            source='graphene://' + dataset.pychunkedgraph_viewer_source)
    else:
        state.layers['seg'] = neuroglancer.SegmentationLayer(
            source='precomputed://' + dataset.flat_segmentation_source)
    state.layers['ann'] = neuroglancer.AnnotationLayer()
    state.layout = "xy-3d"
    ng_url = neuroglancer.to_url(state,
                                 prefix=current_app.config['NEUROGLANCER_URL'])
    return render_template('dataset.html',
                           dataset=dataset,
                           ng_url=ng_url,
                           version=__version__)
Ejemplo n.º 20
0
def test_screenshot_basic(webdriver):
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(
                source=neuroglancer.LocalVolume(data=a, dimensions=s.dimensions),
                shader='void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
    screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
    np.testing.assert_array_equal(screenshot.image_pixels,
                                  np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
def add_dask_layer(state):
    """Adds a lazily-computed data source backed by dask."""
    # https://docs.dask.org/en/latest/array-creation.html#using-dask-delayed
    import dask
    import dask.array

    def make_array(k):
        print('Computing k=%d' % (k, ))
        return np.full(shape=(256, 256), fill_value=k, dtype=np.uint8)

    lazy_make_array = dask.delayed(make_array, pure=True)
    lazy_chunks = [lazy_make_array(k) for k in range(255)]
    sample = lazy_chunks[0].compute()  # load the first chunk (assume rest are same shape/dtype)
    arrays = [
        dask.array.from_delayed(lazy_chunk, dtype=sample.dtype, shape=sample.shape)
        for lazy_chunk in lazy_chunks
    ]
    x = dask.array.concatenate(arrays)
    state.layers['dask'] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(x))
def test_display_dimensions(webdriver, display_dimensions, layout, key,
                            expected_position):
    a = np.zeros((10, 10, 10), dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["z", "y", "x"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
                data=a, dimensions=s.dimensions), ),
        )
        s.display_dimensions = display_dimensions
        s.layout = layout
    webdriver.sync()
    webdriver.action_chain().move_to_element_with_offset(
        webdriver.root_element, 100, 100).click().send_keys(key).perform()
    webdriver.sync()
    assert np.floor(
        webdriver.viewer.state.position).tolist() == expected_position
Ejemplo n.º 23
0
def layer(txn,
          name,
          img,
          shader=None,
          multiplier=1.0,
          dimensions=None,
          offx=0,
          offy=0,
          offz=0,
          voxel_size=default_voxel_size):
    """Add an image layer to Neuroglancer

    :param txn: The transaction context of the viewer.
    :param name: The name of the layer as displayed in Neuroglancer.
    :param img: The image to display in TCZYX order.
    :param shader: the shader to use when displaying, e.g. gray_shader
    :param multiplier: the multiplier to apply to the normalized data value.
    This can be used to brighten or dim the image.
    """

    if isinstance(img, str):
        source = img

    else:
        if dimensions is None:
            dim_names = ["xyzct"[d] for d in range(img.ndim)]
            dim_units = ["µm"] * img.ndim
            dim_scales = [1.0] * img.ndim

            dimensions = neuroglancer.CoordinateSpace(names=dim_names,
                                                      units=dim_units,
                                                      scales=dim_scales)

        source = neuroglancer.LocalVolume(data=reverse_dimensions(img),
                                          dimensions=dimensions,
                                          voxel_offset=(offx, offy, offz))

    shader = shader or gray_shader

    txn.layers[name] = neuroglancer.ImageLayer(source=source,
                                               shader=shader % multiplier)
Ejemplo n.º 24
0
def add_cdf_test_layer(state, dtype, min_value=None, max_value=None):
    dimensions = neuroglancer.CoordinateSpace(names=['x'],
                                              units='',
                                              scales=[1])
    state.dimensions = dimensions
    if min_value is None or max_value is None:
        info = np.iinfo(dtype)
        if min_value is None:
            min_value = info.min
        if max_value is None:
            max_value = info.max
    data = np.linspace(start=min_value,
                       stop=max_value,
                       endpoint=True,
                       dtype=dtype,
                       num=256)
    state.layers[np.dtype(dtype).name] = neuroglancer.ImageLayer(
        source=neuroglancer.LocalVolume(
            data=data,
            dimensions=dimensions,
        ))
Ejemplo n.º 25
0
def main():
    global output_file_name
    global synapses
    global viewer
    args = parse_args()
    neuroglancer.set_static_content_source(url=args.static_content_source)
    neuroglancer.set_server_bind_address(bind_port=int(args.port))
    image_url = args.image_url
    segmentation_url = args.segmentation_url
    output_file_name = args.output

    synapse_dict = json.load(open(args.synapses))

    for n1, n2, x, y, z in zip(synapse_dict["neuron_1"],
                               synapse_dict["neuron_2"],
                               synapse_dict["synapse_center"]["x"],
                               synapse_dict["synapse_center"]["y"],
                               synapse_dict["synapse_center"]["z"]):
        synapses.append(Synapse(n1, n2, x, y, z))

    viewer = neuroglancer.Viewer()
    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
        s.layers['segmentation'] = neuroglancer.SegmentationLayer(
            source=segmentation_url)
    viewer.actions.add("yes", lambda _: yes())
    viewer.actions.add("no", lambda _: no())
    viewer.actions.add("skip", lambda _: skip())
    viewer.actions.add("back", lambda _: back())
    viewer.actions.add("revert", lambda _: set_viewer_state())
    with viewer.config_state.txn() as s:
        s.input_event_bindings.viewer['shift+keyy'] = 'yes'
        s.input_event_bindings.viewer["shift+keyn"] = "no"
        s.input_event_bindings.viewer["shift+keys"] = "skip"
        s.input_event_bindings.viewer["shift+keyr"] = "revert"
        s.input_event_bindings.viewer["shift+keyb"] = "back"
    set_viewer_state()
    webbrowser.open_new(viewer.get_viewer_url())
    while True:
        time.sleep(1)
def test_invlerp(webdriver):

    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y"], units="nm", scales=[1, 1])
        s.position = [0.5, 0.5]
        s.layers.append(
            name='image',
            layer=neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(
                dimensions=s.dimensions,
                data=np.full(shape=(1, 1), dtype=np.uint32, fill_value=42),
            ),
                                          ),
            visible=True,
            shader_controls={
                'normalized': {
                    'range': [0, 42],
                },
            },
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False

    control = webdriver.viewer.state.layers['image'].shader_controls['normalized']
    assert isinstance(control, neuroglancer.InvlerpParameters)
    np.testing.assert_equal(control.range, [0, 42])

    def expect_color(color):
        webdriver.sync()
        screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
        np.testing.assert_array_equal(screenshot.image_pixels,
                                      np.tile(np.array(color, dtype=np.uint8), (10, 10, 1)))

    expect_color([255, 255, 255, 255])
    with webdriver.viewer.txn() as s:
        s.layers['image'].shader_controls = {
            'normalized': neuroglancer.InvlerpParameters(range=[42, 100]),
        }
    expect_color([0, 0, 0, 255])
def test_context_lost(webdriver):
    a = np.array([[[255]]], dtype=np.uint8)
    with webdriver.viewer.txn() as s:
        s.dimensions = neuroglancer.CoordinateSpace(names=["x", "y", "z"],
                                                    units="nm",
                                                    scales=[1, 1, 1])
        s.layers.append(
            name="a",
            layer=neuroglancer.ImageLayer(
                source=neuroglancer.LocalVolume(data=a,
                                                dimensions=s.dimensions),
                shader='void main () { emitRGB(vec3(1.0, 0.0, 0.0)); }',
            ),
        )
        s.layout = 'xy'
        s.cross_section_scale = 1e-6
        s.show_axis_lines = False
    screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
    webdriver.driver.execute_script('''
window.webglLoseContext = viewer.gl.getExtension('WEBGL_lose_context');
window.webglLoseContext.loseContext();
''')
    time.sleep(3)  # Wait a few seconds for log messages to be written
    browser_log = webdriver.get_log_messages()
    assert 'Lost WebGL context' in browser_log
    webdriver.driver.execute_script('''
window.webglLoseContext.restoreContext();
''')
    time.sleep(3)  # Wait a few seconds for log messages to be written
    browser_log = webdriver.get_log_messages()
    assert 'WebGL context restored' in browser_log
    screenshot = webdriver.viewer.screenshot(size=[10, 10]).screenshot
    np.testing.assert_array_equal(
        screenshot.image_pixels,
        np.tile(np.array([255, 0, 0, 255], dtype=np.uint8), (10, 10, 1)))
Ejemplo n.º 28
0
import neuroglancer
import numpy as np
import argparse
from pathlib import Path

viewer = neuroglancer.Viewer()

print(viewer)

parser = argparse.ArgumentParser()
parser.add_argument("nifti_path", type=Path)
p = parser.parse_args()

with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        #source='nifti://http://127.0.0.1:9000/B51315_T1_masked.nii.gz',
        source='nifti://http://127.0.0.1:9000/B51325_invivoAPOE1_labels.nii.gz'
    )

webbrowser.open_new(viewer.get_viewer_url())
print(neuroglancer.to_url(viewer.state))
print(viewer.state)
print(viewer)

neuroglancer.stop()
Ejemplo n.º 29
0
    def __init__(self, graph, agglo_id, image_url, segmentation_url, state_path):
        self.graph = graph
        self.agglo_id = agglo_id
        self.image_url = image_url
        self.segmentation_url = segmentation_url
        self.state = InteractiveState(state_path)
        self.cached_split_result = CachedSplitResult(
            state=self.state, graph=self.graph, agglo_id=self.agglo_id)
        self.agglo_members = set(self.graph.get_agglo_members(agglo_id))

        if state_path is not None and os.path.exists(state_path):
            self.state.load()
        else:
            self.state.initialize(self.agglo_members)

        viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inclusive-seed', self._add_inclusive_seed)
        viewer.actions.add('exclusive-seed', self._add_exclusive_seed)
        viewer.actions.add('next-component', self._next_component)
        viewer.actions.add('prev-component', self._prev_component)
        viewer.actions.add('new-component', self._make_new_component)
        viewer.actions.add('exclude-component', self._exclude_component)
        viewer.actions.add('exclude-all-but-component', self._exclude_all_but_component)

        key_bindings = [
            ['bracketleft', 'prev-component'],
            ['bracketright', 'next-component'],
            ['at:dblclick0', 'exclude-component'],
            ['at:shift+mousedown2', 'exclude-all-but-component'],
            ['at:control+mousedown0', 'inclusive-seed'],
            ['at:shift+mousedown0', 'exclusive-seed'],
            ['enter', 'new-component'],
        ]

        with viewer.txn() as s:
            s.layers.append(
                name='image',
                layer=neuroglancer.ImageLayer(source=self.image_url),
            )
            s.layers.append(
                name='original',
                layer=neuroglancer.SegmentationLayer(
                    source=self.segmentation_url,
                    segments=self.agglo_members,
                ),
            )
            s.layers.append(
                name='unused',
                layer=neuroglancer.SegmentationLayer(source=self.segmentation_url,
                                                     ),
                visible=False,
            )
            s.layers.append(
                name='split-result',
                layer=neuroglancer.SegmentationLayer(
                    source=self.segmentation_url,
                    segments=self.agglo_members,
                ),
            )
            s.concurrent_downloads = 256
            self._update_state(s)

        with viewer.config_state.txn() as s:
            s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
                                                               for key, command in key_bindings))
            for key, command in key_bindings:
                s.input_event_bindings.viewer[key] = command
                s.input_event_bindings.slice_view[key] = command
                s.input_event_bindings.perspective_view[key] = command
            self._update_config_state(s)

        viewer.shared_state.add_changed_callback(
            lambda: viewer.defer_callback(self._handle_state_changed))
Ejemplo n.º 30
0
    neuroglancer.cli.add_server_arguments(ap)
    args = ap.parse_args()
    neuroglancer.cli.handle_server_arguments(args)
    viewer = neuroglancer.Viewer()

    with viewer.txn() as s:
        s.layers['image'] = neuroglancer.ImageLayer(
            source=
            'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            panels=[
                neuroglancer.LayerSidePanelState(
                    side='left',
                    col=0,
                    row=0,
                    tab='render',
                    tabs=['source', 'rendering'],
                ),
                neuroglancer.LayerSidePanelState(
                    side='left',
                    col=0,
                    row=1,
                    tab='render',
                    tabs=['annotations'],
                ),
            ],
        )
        s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
            source=
            'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
        )
    print(viewer.state)
    print(viewer)